Bug 1799402 - Vendor wpf-gpu-raster Rust crate. r=jrmuizel,supply-chain-reviewers

This adds the wpf-gpu-raster Rust crate so we can use it for the accelerated
Canvas2D project.

Differential Revision: https://phabricator.services.mozilla.com/D161477
This commit is contained in:
Lee Salzman 2022-11-12 08:06:43 +00:00
parent 2b52180aad
commit cc4019f675
41 changed files with 11620 additions and 0 deletions

View file

@ -102,6 +102,11 @@ git = "https://github.com/chris-zen/coremidi.git"
replace-with = "vendored-sources"
rev = "fc68464b5445caf111e41f643a2e69ccce0b4f83"
[source."https://github.com/FirefoxGraphics/wpf-gpu-raster"]
git = "https://github.com/FirefoxGraphics/wpf-gpu-raster"
replace-with = "vendored-sources"
rev = "f0d95ce14af8a8de74f469dbad715c4064fca2e1"
[source.crates-io]
replace-with = "vendored-sources"

15
Cargo.lock generated
View file

@ -2204,6 +2204,7 @@ dependencies = [
"webext_storage_bridge",
"webrender_bindings",
"wgpu_bindings",
"wpf-gpu-raster",
"xpcom",
"xulstore",
]
@ -5556,6 +5557,12 @@ dependencies = [
"rustc-hash",
]
[[package]]
name = "typed-arena-nomut"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfc9d8d4e8c94375df96d6ac01a18c263d3d529bc4a53a207580ae9bc30e87c1"
[[package]]
name = "typenum"
version = "1.15.0"
@ -6363,6 +6370,14 @@ dependencies = [
"winapi",
]
[[package]]
name = "wpf-gpu-raster"
version = "0.1.0"
source = "git+https://github.com/FirefoxGraphics/wpf-gpu-raster?rev=f0d95ce14af8a8de74f469dbad715c4064fca2e1#f0d95ce14af8a8de74f469dbad715c4064fca2e1"
dependencies = [
"typed-arena-nomut",
]
[[package]]
name = "wr_malloc_size_of"
version = "0.0.2"

View file

@ -1293,6 +1293,11 @@ who = "Glenn Watson <git@intuitionlibrary.com>"
criteria = "safe-to-deploy"
version = "0.1.2"
[[audits.typed-arena-nomut]]
who = "Lee Salzman <lsalzman@gmail.com>"
criteria = "safe-to-deploy"
version = "0.1.0"
[[audits.uluru]]
who = "Emilio Cobos Álvarez <emilio@crisal.io>"
criteria = "safe-to-deploy"
@ -1625,6 +1630,12 @@ also contains a small C file compiled at build-time. I audited all of it and it
looks correct.
"""
[[audits.wpf-gpu-raster]]
who = "Lee Salzman <lsalzman@mozilla.com>"
criteria = "safe-to-deploy"
version = "0.1.0"
notes = "Written and maintained by Gfx team at Mozilla."
[[audits.xmldecl]]
who = "Henri Sivonen <hsivonen@hsivonen.fi>"
criteria = "safe-to-deploy"

View file

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"341c18fadacf5a5e634ed9225a14cf52d1acf3060bbef7cac7e06a9cc03d9d51","Cargo.toml":"59835e08c5df189f959b3ae85f3bb1d3ced3202ed83c01a84e1b216dc352cbc5","LICENSE":"9ed5e982274d54d0cf94f0e9f9fd889182b6f1f50a012f0be41ce7c884347ab6","README.md":"c01093b0ed283660525630cac3940c053e6fc9d16b2c2b96f5997060bbf518be","benches/benches.rs":"81a7aecb26801254b2c02583e0f48c1068f321c7988e4c76f914b26a4ee76ebf","ci/miri.sh":"10afdbeb3ed16e2eb2f330bc816652a3cd3fb9e49d6f5582e45014b59572b583","src/lib.rs":"6da96cda4f88099ff69018035795ffe74d84a8deed37a8dd5cf58ba7bcb90b2e","src/test.rs":"a4924cc873b077c14505d16a6b6ed8d4c248c21ebf5f76a2993d66b03ae9eae6"},"package":"bfc9d8d4e8c94375df96d6ac01a18c263d3d529bc4a53a207580ae9bc30e87c1"}

View file

@ -0,0 +1,143 @@
## Unreleased
Released YYYY/MM/DD.
### Added
* TODO (or remove section if none)
### Changed
* TODO (or remove section if none)
### Deprecated
* TODO (or remove section if none)
### Removed
* TODO (or remove section if none)
### Fixed
* TODO (or remove section if none)
### Security
* TODO (or remove section if none)
--------------------------------------------------------------------------------
## 2.0.1
Released 2019/01/10.
### Fixed
* Support `#![no_std]` on stable Rust.
--------------------------------------------------------------------------------
## 2.0.0
Released 2019/12/03.
### Fixed
* Fixed some intra-documentation URLs.
--------------------------------------------------------------------------------
## 2.0.0-rc1
Released 2019/11/26.
Unless any issues are discovered or raised, we will release version 2.0.0 soon.
### Added
* Added `alloc_str` to `Arena<u8>`, to be able to allocate string slices.
### Changed
* The minimum supported rust version is now 1.36.0.
* `alloc_uninitialized` returns `&mut [MaybeUninit<T>]` instead of `*mut [T]`,
which is less prone to undefined behavior.
--------------------------------------------------------------------------------
## 1.7.0
Released 2019/10/31. *Spooky!*
### Added
* Added a `len` method to count how many items are in an arena.
### Fixed
* Fixed some theoretical overflows.
--------------------------------------------------------------------------------
## 1.6.1
Released 2019/09/17.
### Fixed
* Now compiles on old stable Rust versions again, instead of just new stable
Rust versions. From here on out, we'll promise that 1.X will continue to
compile on rustc versions >= 1.32.
--------------------------------------------------------------------------------
## 1.6.0
Released 2019/09/09.
### Added
* Added the `Arena::iter_mut` method for mutably iterating over an arena's
contents. [See #29 for
details.](https://github.com/SimonSapin/rust-typed-arena/pull/29)
--------------------------------------------------------------------------------
## 1.5.0
Released 2019/08/02.
### Added
* `Arena` now implements `Default`
### Fixed
* Introduced an internal fast path for allocation, improving performance.
* Tests now run cleanly on Miri. There was previously a technicality where
the stacked borrow rules were not being followed.
--------------------------------------------------------------------------------
## 1.4.1
Released 2018/06/29.
### Added
* Added more documentation comments and examples.
--------------------------------------------------------------------------------
## 1.4.0
Released 2018/06/21.
### Added
* Added a new, on-by-default feature named "std". Disabling this feature allows
the crate to be used in `#![no_std]` environments. [#15][] [#12][]
[#15]: https://github.com/SimonSapin/rust-typed-arena/pull/15
[#12]: https://github.com/SimonSapin/rust-typed-arena/pull/12

View file

@ -0,0 +1,39 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
name = "typed-arena-nomut"
version = "0.1.0"
authors = ["Simon Sapin <simon.sapin@exyr.org>", "Nick Fitzgerald <fitzgen@gmail.com>"]
description = "The arena, a fast but limited type of allocator"
documentation = "https://docs.rs/typed-arena"
readme = "./README.md"
keywords = ["arena"]
categories = ["memory-management", "no-std"]
license = "MIT"
repository = "https://github.com/jrmuizel/typed-arena-nomut"
[profile.bench]
debug = true
[lib]
name = "typed_arena_nomut"
path = "src/lib.rs"
[[bench]]
name = "benches"
path = "benches/benches.rs"
harness = false
[dev-dependencies.criterion]
version = "0.3.4"
[features]
default = ["std"]
std = []

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2018 The typed-arena developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,74 @@
# `typed-arena-nomut`
[![](https://docs.rs/typed-arena-nomut/badge.svg)](https://docs.rs/typed-arena-nomut/)
[![](https://img.shields.io/crates/v/typed-arena-nomut.svg)](https://crates.io/crates/typed-arena-nomut)
[![](https://img.shields.io/crates/d/typed-arena-nomut.svg)](https://crates.io/crates/typed-arena-nomut)
This is a fork of the typed-arena arena crate that returns an immutable reference instead of
mutable one. This allows iteration on the arena items while they're borrowed.
**A fast (but limited) allocation arena for values of a single type.**
Allocated objects are destroyed all at once, when the arena itself is destroyed.
There is no deallocation of individual objects while the arena itself is still
alive. The flipside is that allocation is fast: typically just a vector push.
There is also a method `into_vec()` to recover ownership of allocated objects
when the arena is no longer required, instead of destroying everything.
## Example
```rust
use typed_arena_nomut::Arena;
struct Monster {
level: u32,
}
let monsters = Arena::new();
let goku = monsters.alloc(Monster { level: 9001 });
assert!(goku.level > 9000);
```
## Safe Cycles
All allocated objects get the same lifetime, so you can safely create cycles
between them. This can be useful for certain data structures, such as graphs
and trees with parent pointers.
```rust
use std::cell::Cell;
use typed_arena_nomut::Arena;
struct CycleParticipant<'a> {
other: Cell<Option<&'a CycleParticipant<'a>>>,
}
let arena = Arena::new();
let a = arena.alloc(CycleParticipant { other: Cell::new(None) });
let b = arena.alloc(CycleParticipant { other: Cell::new(None) });
a.other.set(Some(b));
b.other.set(Some(a));
```
## Alternatives
### Need to allocate many different types of values?
Use multiple arenas if you have only a couple different types or try
[`bumpalo`](https://crates.io/crates/bumpalo), which is a bump-allocation arena
can allocate heterogenous types of values.
### Want allocation to return identifiers instead of references and dealing with references and lifetimes everywhere?
Check out [`id-arena`](https://crates.io/crates/id-arena) or
[`generational-arena`](https://crates.io/crates/generational-arena).
### Need to deallocate individual objects at a time?
Check out [`generational-arena`](https://crates.io/crates/generational-arena)
for an arena-style crate or look for a more traditional allocator.

View file

@ -0,0 +1,40 @@
#[macro_use]
extern crate criterion;
extern crate typed_arena_nomut;
use criterion::{Criterion, BenchmarkId};
#[derive(Default)]
struct Small(usize);
#[derive(Default)]
struct Big([usize; 32]);
fn allocate<T: Default>(n: usize) {
let arena = typed_arena_nomut::Arena::new();
for _ in 0..n {
let val: &T = arena.alloc(Default::default());
criterion::black_box(val);
}
}
fn criterion_benchmark(c: &mut Criterion) {
let mut group = c.benchmark_group("allocate");
for n in 1..5 {
let n = n * 1000;
group.throughput(criterion::Throughput::Elements(n as u64));
group.bench_with_input(
BenchmarkId::new("allocate-small", n),
&n,
|b, &n| b.iter(|| allocate::<Small>(n)),
);
group.bench_with_input(
BenchmarkId::new("allocate-big", n),
&n,
|b, &n| b.iter(|| allocate::<Big>(n)),
);
}
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);

View file

@ -0,0 +1,11 @@
set -ex
MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
rustup default "$MIRI_NIGHTLY"
cargo clean
rustup component add miri
cargo miri setup
cargo miri test

View file

@ -0,0 +1,633 @@
//! The arena, a fast but limited type of allocator.
//!
//! **A fast (but limited) allocation arena for values of a single type.**
//!
//! Allocated objects are destroyed all at once, when the arena itself is
//! destroyed. There is no deallocation of individual objects while the arena
//! itself is still alive. The flipside is that allocation is fast: typically
//! just a vector push.
//!
//! There is also a method `into_vec()` to recover ownership of allocated
//! objects when the arena is no longer required, instead of destroying
//! everything.
//!
//! ## Example
//!
//! ```
//! use typed_arena_nomut::Arena;
//!
//! struct Monster {
//! level: u32,
//! }
//!
//! let monsters = Arena::new();
//!
//! let goku = monsters.alloc(Monster { level: 9001 });
//! assert!(goku.level > 9000);
//! ```
//!
//! ## Safe Cycles
//!
//! All allocated objects get the same lifetime, so you can safely create cycles
//! between them. This can be useful for certain data structures, such as graphs
//! and trees with parent pointers.
//!
//! ```
//! use std::cell::Cell;
//! use typed_arena_nomut::Arena;
//!
//! struct CycleParticipant<'a> {
//! other: Cell<Option<&'a CycleParticipant<'a>>>,
//! }
//!
//! let arena = Arena::new();
//!
//! let a = arena.alloc(CycleParticipant { other: Cell::new(None) });
//! let b = arena.alloc(CycleParticipant { other: Cell::new(None) });
//!
//! a.other.set(Some(b));
//! b.other.set(Some(a));
//! ```
// Potential optimizations:
// 1) add and stabilize a method for in-place reallocation of vecs.
// 2) add and stabilize placement new.
// 3) use an iterator. This may add far too much unsafe code.
#![deny(missing_docs)]
#![cfg_attr(not(any(feature = "std", test)), no_std)]
#[cfg(not(feature = "std"))]
extern crate alloc;
#[cfg(any(feature = "std", test))]
extern crate core;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use core::cell::RefCell;
use core::cmp;
use core::iter;
use core::mem;
use core::slice;
use core::str;
use std::cell::Ref;
use mem::MaybeUninit;
#[cfg(test)]
mod test;
// Initial size in bytes.
const INITIAL_SIZE: usize = 1024;
// Minimum capacity. Must be larger than 0.
const MIN_CAPACITY: usize = 1;
/// An arena of objects of type `T`.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// struct Monster {
/// level: u32,
/// }
///
/// let monsters = Arena::new();
///
/// let vegeta = monsters.alloc(Monster { level: 9001 });
/// assert!(vegeta.level > 9000);
/// ```
pub struct Arena<T> {
chunks: RefCell<ChunkList<T>>,
}
struct ChunkList<T> {
current: Vec<T>,
rest: Vec<Vec<T>>,
}
impl<T> Arena<T> {
/// Construct a new arena.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// let arena = Arena::new();
/// # arena.alloc(1);
/// ```
pub fn new() -> Arena<T> {
let size = cmp::max(1, mem::size_of::<T>());
Arena::with_capacity(INITIAL_SIZE / size)
}
/// Construct a new arena with capacity for `n` values pre-allocated.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// let arena = Arena::with_capacity(1337);
/// # arena.alloc(1);
/// ```
pub fn with_capacity(n: usize) -> Arena<T> {
let n = cmp::max(MIN_CAPACITY, n);
Arena {
chunks: RefCell::new(ChunkList {
current: Vec::with_capacity(n),
rest: Vec::new(),
}),
}
}
/// Return the size of the arena
///
/// This is useful for using the size of previous typed arenas to build new typed arenas with large enough spaces.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// let arena = Arena::with_capacity(0);
/// let a = arena.alloc(1);
/// let b = arena.alloc(2);
///
/// assert_eq!(arena.len(), 2);
/// ```
pub fn len(&self) -> usize {
let chunks = self.chunks.borrow();
let mut res = 0;
for vec in chunks.rest.iter() {
res += vec.len()
}
res + chunks.current.len()
}
/// Allocates a value in the arena, and returns a mutable reference
/// to that value.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// let arena = Arena::new();
/// let x = arena.alloc(42);
/// assert_eq!(*x, 42);
/// ```
#[inline]
pub fn alloc(&self, value: T) -> &T {
self.alloc_fast_path(value)
.unwrap_or_else(|value| self.alloc_slow_path(value))
}
#[inline]
fn alloc_fast_path(&self, value: T) -> Result<&T, T> {
let mut chunks = self.chunks.borrow_mut();
let len = chunks.current.len();
if len < chunks.current.capacity() {
chunks.current.push(value);
// Avoid going through `Vec::deref_mut`, which overlaps
// other references we have already handed out!
debug_assert!(len < chunks.current.len()); // bounds check
Ok(unsafe { &mut *chunks.current.as_mut_ptr().add(len) })
} else {
Err(value)
}
}
fn alloc_slow_path(&self, value: T) -> &T {
&self.alloc_extend(iter::once(value))[0]
}
/// Uses the contents of an iterator to allocate values in the arena.
/// Returns a mutable slice that contains these values.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// let arena = Arena::new();
/// let abc = arena.alloc_extend("abcdefg".chars().take(3));
/// assert_eq!(abc, ['a', 'b', 'c']);
/// ```
pub fn alloc_extend<I>(&self, iterable: I) -> &[T]
where
I: IntoIterator<Item = T>,
{
let mut iter = iterable.into_iter();
let mut chunks = self.chunks.borrow_mut();
let iter_min_len = iter.size_hint().0;
let mut next_item_index;
debug_assert!(
chunks.current.capacity() >= chunks.current.len(),
"capacity is always greater than or equal to len, so we don't need to worry about underflow"
);
if iter_min_len > chunks.current.capacity() - chunks.current.len() {
chunks.reserve(iter_min_len);
chunks.current.extend(iter);
next_item_index = 0;
} else {
next_item_index = chunks.current.len();
let mut i = 0;
while let Some(elem) = iter.next() {
if chunks.current.len() == chunks.current.capacity() {
// The iterator was larger than we could fit into the current chunk.
let chunks = &mut *chunks;
// Create a new chunk into which we can freely push the entire iterator into
chunks.reserve(i + 1);
let previous_chunk = chunks.rest.last_mut().unwrap();
let previous_chunk_len = previous_chunk.len();
// Move any elements we put into the previous chunk into this new chunk
chunks
.current
.extend(previous_chunk.drain(previous_chunk_len - i..));
chunks.current.push(elem);
// And the remaining elements in the iterator
chunks.current.extend(iter);
next_item_index = 0;
break;
} else {
chunks.current.push(elem);
}
i += 1;
}
}
let new_slice_ref = &mut chunks.current[next_item_index..];
// Extend the lifetime from that of `chunks_borrow` to that of `self`.
// This is OK because were careful to never move items
// by never pushing to inner `Vec`s beyond their initial capacity.
// The returned reference is unique (`&mut`):
// the `Arena` never gives away references to existing items.
unsafe { mem::transmute::<&mut [T], &mut [T]>(new_slice_ref) }
}
/// Allocates space for a given number of values, but doesn't initialize it.
///
/// ## Safety
///
/// After calling this method, the arena considers the elements initialized. If you fail to
/// initialize them (which includes because of panicking during the initialization), the arena
/// will run destructors on the uninitialized memory. Therefore, you must initialize them.
///
/// Considering how easy it is to cause undefined behaviour using this, you're advised to
/// prefer the other (safe) methods, like [`alloc_extend`][Arena::alloc_extend].
///
/// ## Example
///
/// ```rust
/// use std::mem::{self, MaybeUninit};
/// use std::ptr;
/// use typed_arena_nomut::Arena;
///
/// // Transmute from MaybeUninit slice to slice of initialized T.
/// // It is a separate function to preserve the lifetime of the reference.
/// unsafe fn transmute_uninit<A>(r: &mut [MaybeUninit<A>]) -> &mut [A] {
/// mem::transmute(r)
/// }
///
/// let arena: Arena<bool> = Arena::new();
/// let slice: &mut [bool];
/// unsafe {
/// let uninitialized = arena.alloc_uninitialized(10);
/// for elem in uninitialized.iter_mut() {
/// ptr::write(elem.as_mut_ptr(), true);
/// }
/// slice = transmute_uninit(uninitialized);
/// }
/// ```
///
/// ## Alternative allocation pattern
///
/// To avoid the problem of dropping assumed to be initialized elements on panic, it is also
/// possible to combine the [`reserve_extend`][Arena::reserve_extend] with
/// [`uninitialized_array`][Arena::uninitialized_array], initialize the elements and confirm
/// them by this method. In such case, when there's a panic during initialization, the already
/// initialized elements would leak but it wouldn't cause UB.
///
/// ```rust
/// use std::mem::{self, MaybeUninit};
/// use std::ptr;
/// use typed_arena_nomut::Arena;
///
/// unsafe fn transmute_uninit<A>(r: &mut [MaybeUninit<A>]) -> &mut [A] {
/// mem::transmute(r)
/// }
///
/// const COUNT: usize = 2;
///
/// let arena: Arena<String> = Arena::new();
///
/// arena.reserve_extend(COUNT);
/// let slice: &mut [String];
/// unsafe {
/// // Perform initialization before we claim the memory.
/// let uninitialized = arena.uninitialized_array();
/// assert!((*uninitialized).len() >= COUNT); // Ensured by the reserve_extend
/// for elem in &mut (*uninitialized)[..COUNT] {
/// ptr::write(elem.as_mut_ptr(), "Hello".to_owned());
/// }
/// let addr = (*uninitialized).as_ptr() as usize;
///
/// // The alloc_uninitialized returns the same memory, but "confirms" its allocation.
/// slice = transmute_uninit(arena.alloc_uninitialized(COUNT));
/// assert_eq!(addr, slice.as_ptr() as usize);
/// assert_eq!(slice, &["Hello".to_owned(), "Hello".to_owned()]);
/// }
/// ```
pub unsafe fn alloc_uninitialized(&self, num: usize) -> &mut [MaybeUninit<T>] {
let mut chunks = self.chunks.borrow_mut();
debug_assert!(
chunks.current.capacity() >= chunks.current.len(),
"capacity is always greater than or equal to len, so we don't need to worry about underflow"
);
if num > chunks.current.capacity() - chunks.current.len() {
chunks.reserve(num);
}
// At this point, the current chunk must have free capacity.
let next_item_index = chunks.current.len();
chunks.current.set_len(next_item_index + num);
// Go through pointers, to make sure we never create a reference to uninitialized T.
let start = chunks.current.as_mut_ptr().offset(next_item_index as isize);
let start_uninit = start as *mut MaybeUninit<T>;
slice::from_raw_parts_mut(start_uninit, num)
}
/// Makes sure there's enough continuous space for at least `num` elements.
///
/// This may save some work if called before [`alloc_extend`][Arena::alloc_extend]. It also
/// allows somewhat safer use pattern of [`alloc_uninitialized`][Arena::alloc_uninitialized].
/// On the other hand this might waste up to `n - 1` elements of space. In case new allocation
/// is needed, the unused ones in current chunk are never used.
pub fn reserve_extend(&self, num: usize) {
let mut chunks = self.chunks.borrow_mut();
debug_assert!(
chunks.current.capacity() >= chunks.current.len(),
"capacity is always greater than or equal to len, so we don't need to worry about underflow"
);
if num > chunks.current.capacity() - chunks.current.len() {
chunks.reserve(num);
}
}
/// Returns unused space.
///
/// *This unused space is still not considered "allocated".* Therefore, it
/// won't be dropped unless there are further calls to `alloc`,
/// [`alloc_uninitialized`][Arena::alloc_uninitialized], or
/// [`alloc_extend`][Arena::alloc_extend] which is why the method is safe.
///
/// It returns a raw pointer to avoid creating multiple mutable references to the same place.
/// It is up to the caller not to dereference it after any of the `alloc_` methods are called.
pub fn uninitialized_array(&self) -> *mut [MaybeUninit<T>] {
let mut chunks = self.chunks.borrow_mut();
let len = chunks.current.capacity() - chunks.current.len();
let next_item_index = chunks.current.len();
unsafe {
// Go through pointers, to make sure we never create a reference to uninitialized T.
let start = chunks.current.as_mut_ptr().offset(next_item_index as isize);
let start_uninit = start as *mut MaybeUninit<T>;
slice::from_raw_parts_mut(start_uninit, len) as *mut _
}
}
/// Convert this `Arena` into a `Vec<T>`.
///
/// Items in the resulting `Vec<T>` appear in the order that they were
/// allocated in.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// let arena = Arena::new();
///
/// arena.alloc("a");
/// arena.alloc("b");
/// arena.alloc("c");
///
/// let easy_as_123 = arena.into_vec();
///
/// assert_eq!(easy_as_123, vec!["a", "b", "c"]);
/// ```
pub fn into_vec(self) -> Vec<T> {
let mut chunks = self.chunks.into_inner();
// keep order of allocation in the resulting Vec
let n = chunks
.rest
.iter()
.fold(chunks.current.len(), |a, v| a + v.len());
let mut result = Vec::with_capacity(n);
for mut vec in chunks.rest {
result.append(&mut vec);
}
result.append(&mut chunks.current);
result
}
/// Returns an iterator that allows modifying each value.
///
/// Items are yielded in the order that they were allocated.
///
/// ## Example
///
/// ```
/// use typed_arena_nomut::Arena;
/// use std::cell::Cell;
///
/// #[derive(Debug, PartialEq, Eq)]
/// struct Point { x: Cell<i32>, y: i32 };
///
/// let mut arena = Arena::new();
///
/// arena.alloc(Point { x: Cell::new(0), y: 0 });
/// arena.alloc(Point { x: Cell::new(1), y: 1 });
///
/// for point in arena.iter() {
/// point.x.set(point.x.get() + 10);
/// }
///
/// let points = arena.into_vec();
///
/// assert_eq!(points, vec![Point { x: Cell::new(10), y: 0 }, Point { x: Cell::new(11), y: 1 }]);
///
/// ```
///
/// ## Immutable Iteration
///
/// Note that there is no corresponding `iter` method. Access to the arena's contents
/// requries mutable access to the arena itself.
///
/// ```
/// use typed_arena_nomut::Arena;
/// use std::cell::Cell;
///
/// let mut arena = Arena::new();
/// let x = arena.alloc(Cell::new(1));
///
/// for i in arena.iter() {
/// println!("i: {}", i.get());
/// }
///
/// x.set(x.get() * 2);
/// ```
#[inline]
pub fn iter(&self) -> Iter<T> {
let chunks = self.chunks.borrow();
let position = if !chunks.rest.is_empty() {
let index = 0;
let inner_iter = chunks.rest[index].iter();
// Extend the lifetime of the individual elements to that of the arena.
// This is OK because we borrow the arena mutably to prevent new allocations
// and we take care here to never move items inside the arena while the
// iterator is alive.
let inner_iter = unsafe { mem::transmute(inner_iter) };
IterState::ChunkListRest { index, inner_iter }
} else {
// Extend the lifetime of the individual elements to that of the arena.
let iter = unsafe { mem::transmute(chunks.current.iter()) };
IterState::ChunkListCurrent { iter }
};
Iter {
chunks,
state: position,
}
}
}
impl Arena<u8> {
/// Allocates a string slice and returns a mutable reference to it.
///
/// This is on `Arena<u8>`, because string slices use byte slices (`[u8]`) as their backing
/// storage.
///
/// # Example
///
/// ```
/// use typed_arena_nomut::Arena;
///
/// let arena: Arena<u8> = Arena::new();
/// let hello = arena.alloc_str("Hello world");
/// assert_eq!("Hello world", hello);
/// ```
#[inline]
pub fn alloc_str(&self, s: &str) -> & str {
let buffer = self.alloc_extend(s.bytes());
// Can't fail the utf8 validation, it already came in as utf8
unsafe { str::from_utf8_unchecked(buffer) }
}
}
impl<T> Default for Arena<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> ChunkList<T> {
#[inline(never)]
#[cold]
fn reserve(&mut self, additional: usize) {
let double_cap = self
.current
.capacity()
.checked_mul(2)
.expect("capacity overflow");
let required_cap = additional
.checked_next_power_of_two()
.expect("capacity overflow");
let new_capacity = cmp::max(double_cap, required_cap);
let chunk = mem::replace(&mut self.current, Vec::with_capacity(new_capacity));
self.rest.push(chunk);
}
}
enum IterState<'a, T> {
ChunkListRest {
index: usize,
inner_iter: slice::Iter<'a, T>,
},
ChunkListCurrent {
iter: slice::Iter<'a, T>,
},
}
/// Mutable arena iterator.
///
/// This struct is created by the [`iter_mut`](struct.Arena.html#method.iter_mut) method on [Arenas](struct.Arena.html).
pub struct Iter<'a, T: 'a> {
chunks: Ref<'a, ChunkList<T>>,
state: IterState<'a, T>,
}
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
fn next(&mut self) -> Option<&'a T> {
loop {
self.state = match self.state {
IterState::ChunkListRest {
mut index,
ref mut inner_iter,
} => {
match inner_iter.next() {
Some(item) => return Some(item),
None => {
index += 1;
if index < self.chunks.rest.len() {
let inner_iter = self.chunks.rest[index].iter();
// Extend the lifetime of the individual elements to that of the arena.
let inner_iter = unsafe { mem::transmute(inner_iter) };
IterState::ChunkListRest { index, inner_iter }
} else {
let iter = self.chunks.current.iter();
// Extend the lifetime of the individual elements to that of the arena.
let iter = unsafe { mem::transmute(iter) };
IterState::ChunkListCurrent { iter }
}
}
}
}
IterState::ChunkListCurrent { ref mut iter } => return iter.next(),
};
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let current_len = self.chunks.current.len();
let current_cap = self.chunks.current.capacity();
if self.chunks.rest.is_empty() {
(current_len, Some(current_len))
} else {
let rest_len = self.chunks.rest.len();
let last_chunk_len = self
.chunks
.rest
.last()
.map(|chunk| chunk.len())
.unwrap_or(0);
let min = current_len + last_chunk_len;
let max = min + (rest_len * current_cap / rest_len);
(min, Some(max))
}
}
}

View file

@ -0,0 +1,373 @@
use super::*;
use std::cell::Cell;
use std::mem;
use std::panic::{self, AssertUnwindSafe};
use std::ptr;
struct DropTracker<'a>(&'a Cell<u32>);
impl<'a> Drop for DropTracker<'a> {
fn drop(&mut self) {
self.0.set(self.0.get() + 1);
}
}
struct Node<'a, 'b: 'a>(Option<&'a Node<'a, 'b>>, u32, DropTracker<'b>);
#[test]
fn arena_as_intended() {
let drop_counter = Cell::new(0);
{
let arena = Arena::with_capacity(2);
let mut node: &Node = arena.alloc(Node(None, 1, DropTracker(&drop_counter)));
assert_eq!(arena.chunks.borrow().rest.len(), 0);
node = arena.alloc(Node(Some(node), 2, DropTracker(&drop_counter)));
assert_eq!(arena.chunks.borrow().rest.len(), 0);
node = arena.alloc(Node(Some(node), 3, DropTracker(&drop_counter)));
assert_eq!(arena.chunks.borrow().rest.len(), 1);
node = arena.alloc(Node(Some(node), 4, DropTracker(&drop_counter)));
assert_eq!(arena.chunks.borrow().rest.len(), 1);
assert_eq!(node.1, 4);
assert_eq!(node.0.unwrap().1, 3);
assert_eq!(node.0.unwrap().0.unwrap().1, 2);
assert_eq!(node.0.unwrap().0.unwrap().0.unwrap().1, 1);
assert!(node.0.unwrap().0.unwrap().0.unwrap().0.is_none());
assert_eq!(arena.len(), 4);
mem::drop(node);
assert_eq!(drop_counter.get(), 0);
let mut node: &Node = arena.alloc(Node(None, 5, DropTracker(&drop_counter)));
assert_eq!(arena.chunks.borrow().rest.len(), 1);
node = arena.alloc(Node(Some(node), 6, DropTracker(&drop_counter)));
assert_eq!(arena.chunks.borrow().rest.len(), 1);
node = arena.alloc(Node(Some(node), 7, DropTracker(&drop_counter)));
assert_eq!(arena.chunks.borrow().rest.len(), 2);
assert_eq!(drop_counter.get(), 0);
assert_eq!(node.1, 7);
assert_eq!(node.0.unwrap().1, 6);
assert_eq!(node.0.unwrap().0.unwrap().1, 5);
assert!(node.0.unwrap().0.unwrap().0.is_none());
assert_eq!(drop_counter.get(), 0);
}
assert_eq!(drop_counter.get(), 7);
}
#[test]
fn ensure_into_vec_maintains_order_of_allocation() {
let arena = Arena::with_capacity(1); // force multiple inner vecs
for &s in &["t", "e", "s", "t"] {
arena.alloc(String::from(s));
}
let vec = arena.into_vec();
assert_eq!(vec, vec!["t", "e", "s", "t"]);
}
#[test]
fn test_zero_cap() {
let arena = Arena::with_capacity(0);
let a = arena.alloc(1);
let b = arena.alloc(2);
assert_eq!(*a, 1);
assert_eq!(*b, 2);
assert_eq!(arena.len(), 2);
}
#[test]
fn test_alloc_extend() {
let arena = Arena::with_capacity(2);
for i in 0..15 {
let slice = arena.alloc_extend(0..i);
for (j, &elem) in slice.iter().enumerate() {
assert_eq!(j, elem);
}
}
}
#[test]
fn test_alloc_uninitialized() {
const LIMIT: usize = 15;
let drop_counter = Cell::new(0);
unsafe {
let arena: Arena<Node> = Arena::with_capacity(4);
for i in 0..LIMIT {
let slice = arena.alloc_uninitialized(i);
for (j, elem) in slice.iter_mut().enumerate() {
ptr::write(elem.as_mut_ptr(), Node(None, j as u32, DropTracker(&drop_counter)));
}
assert_eq!(drop_counter.get(), 0);
}
}
assert_eq!(drop_counter.get(), (0..LIMIT).fold(0, |a, e| a + e) as u32);
}
#[test]
fn test_alloc_extend_with_drop_counter() {
let drop_counter = Cell::new(0);
{
let arena = Arena::with_capacity(2);
let iter = (0..100).map(|j| Node(None, j as u32, DropTracker(&drop_counter)));
let older_ref = Some(&arena.alloc_extend(iter)[0]);
assert_eq!(drop_counter.get(), 0);
let iter = (0..100).map(|j| Node(older_ref, j as u32, DropTracker(&drop_counter)));
arena.alloc_extend(iter);
assert_eq!(drop_counter.get(), 0);
}
assert_eq!(drop_counter.get(), 200);
}
/// Test with bools.
///
/// Bools, unlike integers, have invalid bit patterns. Therefore, ever having an uninitialized bool
/// is insta-UB. Make sure miri doesn't find any such thing.
#[test]
fn test_alloc_uninitialized_bools() {
const LEN: usize = 20;
unsafe {
let arena: Arena<bool> = Arena::with_capacity(2);
let slice = arena.alloc_uninitialized(LEN);
for elem in slice.iter_mut() {
ptr::write(elem.as_mut_ptr(), true);
}
// Now it is fully initialized, we can safely transmute the slice.
let slice: &mut [bool] = mem::transmute(slice);
assert_eq!(&[true; LEN], slice);
}
}
/// Check nothing bad happens by panicking during initialization of borrowed slice.
#[test]
fn alloc_uninitialized_with_panic() {
struct Dropper(bool);
impl Drop for Dropper {
fn drop(&mut self) {
// Just make sure we touch the value, to make sure miri would bite if it was
// unitialized
if self.0 {
panic!();
}
}
}
let mut reached_first_init = false;
panic::catch_unwind(AssertUnwindSafe(|| unsafe {
let arena: Arena<Dropper> = Arena::new();
arena.reserve_extend(2);
let uninitialized = arena.uninitialized_array();
assert!((*uninitialized).len() >= 2);
ptr::write((*uninitialized)[0].as_mut_ptr(), Dropper(false));
reached_first_init = true;
panic!("To drop the arena");
// If it didn't panic, we would continue by initializing the second one and confirming by
// .alloc_uninitialized();
})).unwrap_err();
assert!(reached_first_init);
}
#[test]
fn test_uninitialized_array() {
let arena = Arena::with_capacity(2);
let uninit = arena.uninitialized_array();
arena.alloc_extend(0..2);
unsafe {
for (&a, b) in (&*uninit).iter().zip(0..2) {
assert_eq!(a.assume_init(), b);
}
assert!((&*arena.uninitialized_array()).as_ptr() != (&*uninit).as_ptr());
arena.alloc(0);
let uninit = arena.uninitialized_array();
assert_eq!((&*uninit).len(), 3);
}
}
#[test]
fn dont_trust_the_iterator_size() {
use std::iter::repeat;
struct WrongSizeIter<I>(I);
impl<I> Iterator for WrongSizeIter<I>
where
I: Iterator,
{
type Item = I::Item;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
fn size_hint(&self) -> (usize, Option<usize>) {
(0, Some(0))
}
}
impl<I> ExactSizeIterator for WrongSizeIter<I> where I: Iterator {}
let arena = Arena::with_capacity(2);
arena.alloc(0);
let slice = arena.alloc_extend(WrongSizeIter(repeat(1).take(1_000)));
// Allocation of 1000 elements should have created a new chunk
assert_eq!(arena.chunks.borrow().rest.len(), 1);
assert_eq!(slice.len(), 1000);
}
#[test]
fn arena_is_send() {
fn assert_is_send<T: Send>(_: T) {}
// If `T` is `Send`, ...
assert_is_send(42_u32);
// Then `Arena<T>` is also `Send`.
let arena: Arena<u32> = Arena::new();
assert_is_send(arena);
}
#[test]
fn iter_mut_low_capacity() {
#[derive(Debug, PartialEq, Eq)]
struct NonCopy(usize);
const MAX: usize = 1_000;
const CAP: usize = 16;
let arena = Arena::with_capacity(CAP);
for i in 1..MAX {
arena.alloc(NonCopy(i));
}
assert!(
arena.chunks.borrow().rest.len() > 1,
"expected multiple chunks"
);
let mut iter = arena.iter();
for i in 1..MAX {
assert_eq!(Some(&NonCopy(i)), iter.next());
}
assert_eq!(None, iter.next());
}
#[test]
fn iter_mut_high_capacity() {
#[derive(Debug, PartialEq, Eq)]
struct NonCopy(usize);
const MAX: usize = 1_000;
const CAP: usize = 8192;
let arena = Arena::with_capacity(CAP);
for i in 1..MAX {
arena.alloc(NonCopy(i));
}
assert!(
arena.chunks.borrow().rest.is_empty(),
"expected single chunk"
);
let mut iter = arena.iter();
for i in 1..MAX {
assert_eq!(Some(&NonCopy(i)), iter.next());
}
assert_eq!(None, iter.next());
}
fn assert_size_hint<T>(arena_len: usize, iter: Iter<'_, T>) {
let (min, max) = iter.size_hint();
assert!(max.is_some());
let max = max.unwrap();
// Check that the actual arena length lies between the estimated min and max
assert!(min <= arena_len);
assert!(max >= arena_len);
// Check that the min and max estimates are within a factor of 3
assert!(min >= arena_len / 3);
assert!(max <= arena_len * 3);
}
#[test]
fn size_hint() {
#[derive(Debug, PartialEq, Eq)]
struct NonCopy(usize);
const MAX: usize = 32;
const CAP: usize = 0;
for cap in CAP..(CAP + 16/* check some non-power-of-two capacities */) {
let arena = Arena::with_capacity(cap);
for i in 1..MAX {
arena.alloc(NonCopy(i));
let iter = arena.iter();
assert_size_hint(i, iter);
}
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn size_hint_low_initial_capacities() {
#[derive(Debug, PartialEq, Eq)]
struct NonCopy(usize);
const MAX: usize = 25_000;
const CAP: usize = 0;
for cap in CAP..(CAP + 128/* check some non-power-of-two capacities */) {
let arena = Arena::with_capacity(cap);
for i in 1..MAX {
arena.alloc(NonCopy(i));
let iter = arena.iter();
assert_size_hint(i, iter);
}
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn size_hint_high_initial_capacities() {
#[derive(Debug, PartialEq, Eq)]
struct NonCopy(usize);
const MAX: usize = 25_000;
const CAP: usize = 8164;
for cap in CAP..(CAP + 128/* check some non-power-of-two capacities */) {
let arena = Arena::with_capacity(cap);
for i in 1..MAX {
arena.alloc(NonCopy(i));
let iter = arena.iter();
assert_size_hint(i, iter);
}
}
}
#[test]
#[cfg_attr(miri, ignore)]
fn size_hint_many_items() {
#[derive(Debug, PartialEq, Eq)]
struct NonCopy(usize);
const MAX: usize = 5_000_000;
const CAP: usize = 16;
let arena = Arena::with_capacity(CAP);
for i in 1..MAX {
arena.alloc(NonCopy(i));
let iter = arena.iter();
assert_size_hint(i, iter);
}
}

View file

@ -0,0 +1 @@
{"files":{".github/workflows/coverage.yml":"90aaa068c16cb778b24badaff78baf2a313637780a723be09596abde0f4c827a",".github/workflows/rust.yml":"905954be896d052ced621eedb9d5b9d35795490f27071ac1147e75ac3b3711ec","CHANGES.md":"5f54e553a1c4ef21c5be6109b25df9d1d63c4547627723fe044c73dbddf0db2f","Cargo.toml":"c4f220ebc481f7b1db1909f32c5e95a94f665b40943713f084547d9df2f8c29c","LICENSE":"ae48df11a335dc1a615f4f938b69cba73bcf4485c4f97af49b38efb0f216353b","README.md":"45f9e20ee7a50ca4b4b55918976b3218667d63ebc3075952f8b0ea1d6a6d22f8","examples/draw.rs":"52fee9e2f2c11e1c891b30cb460be2a0ec65974f38dc0c08fd48391caf1e4247","examples/obj-output.rs":"6fc549022aa715eee74ea1cafb89ca33189e9dbe914ea6b2c46160049bda68f3","examples/simple.rs":"99fb566414cbd4a0eb69a2774c9780d7cd17e5cdaa14837b280fba319c053f22","notes":"48e636c646d697e213b3a79e31063e11b6ffc7493592d31f3929b1db495870b8","src/aacoverage.rs":"1f9ebe8db75bd80b6b347e3734b3c5bdb35c6fa984e142271bfcc0c286e0cb45","src/aarasterizer.rs":"d02939c0de5ad1c134543e0e91e89f3d86f6ff718c52a6f40df4db5fb1c4a714","src/bezier.rs":"f089ab04e30077ce4e0fe59dfa602948b989aa53d51ad207fbc30c1edd24086b","src/c_bindings.rs":"e3eadbdb83a0ef3d7a364b4607d1e0469cf97b823d69b4eee578ffec59980315","src/fix.rs":"7ccf63db5bab4ab0135d92691f7c2272a27866b9792dd55ec98b2d1c1b7c0358","src/geometry_sink.rs":"9025569f77f475a1e47fd470e8f53dcdf88ef57e3a5b8a51268fff892da8b1a7","src/helpers.rs":"220294dac335943518f249c4a27ad803f8226ed62cd780f517e95be6343a1f2f","src/hwrasterizer.rs":"55d7771b0f2537bb0ba2cce4006786582759bca4958cb8008822aa4947ac5404","src/hwvertexbuffer.rs":"51f884dda5f91e30a70fb6486673967b216dee295521539d1a4806fcdbf4cf94","src/lib.rs":"bc496e7d4e6827198997a5c5a099000405ac590daece785ca94b4a31e2351c55","src/matrix.rs":"1ac44bc5d073f96ab64b1b5c6077fd0d47fe61db8243bd9a55fc91d8eae1dd92","src/notes":"d50d49e0b5660bc6350d8055f25f26700c937558de0af690e1fc4f50ed7e05c9","src/nullable_ref.rs":"789fe0e59b7d4a925faecbf2362be93643ea8382b4424ca0e60866f9bf83c3cd","src/real.rs":"73a2d1a77613364e9514fd7ead4d708a554d2b7343645cdb4cb8a2b3b640e057","src/tri_rasterize.rs":"30821a3465cea3c5ac578590013b530c03ea3010225f580d6cf609e39910c412","src/types.rs":"43a20f23a798858c6be64c1faf42ee9e392cbab323f851653993fcb0d78cdb5e"},"package":null}

View file

@ -0,0 +1,23 @@
name: Coverage
on: [pull_request, push]
jobs:
coverage:
runs-on: ubuntu-latest
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@v3
- name: Install Rust
run: rustup toolchain install stable --component llvm-tools-preview
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
- name: Generate code coverage
run: cargo llvm-cov --all-features --workspace --lcov --output-path lcov.info
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
token: ${{ secrets.CODECOV_TOKEN }} # not required for public repos
files: lcov.info
fail_ci_if_error: true

View file

@ -0,0 +1,39 @@
name: Rust
on:
push:
pull_request:
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build
run: cargo build --verbose
- name: Run tests
run: cargo test --verbose
aarch64:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
target: aarch64-unknown-linux-gnu
- name: Install cross
run: cargo install cross
- name: Run tests with Neon
run: cross test --target aarch64-unknown-linux-gnu

View file

@ -0,0 +1,22 @@
Changes for Safety
------------------
`CEdgeStore` is replaced by `typed_arena_nomut::Arena<CEdge>`.
`CEdgeStore` is an arena with built-in stack storage for the first allocation
of the arena. It exposes the allocated buffers to support very fast allocation,
and supports fast enumeration by returning pointers to each allocation.
`CCoverageBuffer` also now uses a `typed_arena_nomut::Arena<CEdge>` but uses it
to allocate `CCoverageIntervalBuffer`'s. We currently lack support for
the builtin stack storage. Storing these in an Arena is not ideal, we'd rather
just heap allocate them individually.
Changes for performance
-----------------------
Switched from using triangle strips to triangle lists. This lets
us use a single triangle to draw each line segement which reduces
the amount of geometry per line segment from 6 vertices to 3.
Direct2D also made this switch in later versions.

View file

@ -0,0 +1,21 @@
[package]
name = "wpf-gpu-raster"
version = "0.1.0"
edition = "2021"
license = "MIT"
[profile.release]
debug = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
typed-arena-nomut = "0.1.0"
[dev-dependencies]
usvg = "0.4"
euclid = "0.22.6"
png = "0.17.2"
[features]
default = ["c_bindings"]
c_bindings = []

23
third_party/rust/wpf-gpu-raster/LICENSE vendored Normal file
View file

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) .NET Foundation and Contributors
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,20 @@
This is a port of the WPF hardware rasterizer code to Rust. That
rasterizer is predecessor to the Direct2D rasterizer. Direct2D still
uses a similar technique when run on hardware that does not support
Target Independent Rasterization.
Design
======
Bezier flattening is done using an approach that uses forward differencing
of the error metric to compute a flattened version that would match a traditional
adaptive recursive flattening.
The general algorithm used for rasterization is a vertical sweep of
the shape that maintains an active edge list. The sweep is done
at a sub-scanline resolution and results in either:
1. Sub-scanlines being combined in the coverage buffer and output
as "complex scans". These are emitted as lines constructed out
of triangle strips.
2. Simple trapezoids being recognized in the active edge list
and output using a faster simple trapezoid path.

View file

@ -0,0 +1,354 @@
/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
/**************************************************************************
*
* Copyright 2012 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
*/
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use euclid::{default::Transform2D, point2};
use wpf_gpu_raster::{PathBuilder};
use std::ops::Index;
const WIDTH: u32 = 800;
const HEIGHT: u32 = 800;
fn over(src: u32, dst: u32) -> u32 {
let a = src >> 24;
let a = 255 - a;
let mask = 0xff00ff;
let t = (dst & mask) * a + 0x800080;
let mut rb = (t + ((t >> 8) & mask)) >> 8;
rb &= mask;
rb += src & mask;
// saturate
rb |= 0x1000100 - ((rb >> 8) & mask);
rb &= mask;
let t = ((dst >> 8) & mask) * a + 0x800080;
let mut ag = (t + ((t >> 8) & mask)) >> 8;
ag &= mask;
ag += (src >> 8) & mask;
// saturate
ag |= 0x1000100 - ((ag >> 8) & mask);
ag &= mask;
(ag << 8) + rb
}
pub fn alpha_mul(x: u32, a: u32) -> u32 {
let mask = 0xFF00FF;
let src_rb = ((x & mask) * a) >> 8;
let src_ag = ((x >> 8) & mask) * a;
(src_rb & mask) | (src_ag & !mask)
}
fn write_image(data: &[u32], path: &str) {
use std::path::Path;
use std::fs::File;
use std::io::BufWriter;
let mut png_data: Vec<u8> = vec![0; (WIDTH * HEIGHT * 3) as usize];
let mut i = 0;
for pixel in data {
png_data[i] = ((pixel >> 16) & 0xff) as u8;
png_data[i + 1] = ((pixel >> 8) & 0xff) as u8;
png_data[i + 2] = ((pixel >> 0) & 0xff) as u8;
i += 3;
}
let path = Path::new(path);
let file = File::create(path).unwrap();
let w = &mut BufWriter::new(file);
let mut encoder = png::Encoder::new(w, WIDTH, HEIGHT); // Width is 2 pixels and height is 1.
encoder.set_color(png::ColorType::Rgb);
encoder.set_depth(png::BitDepth::Eight);
let mut writer = encoder.write_header().unwrap();
writer.write_image_data(&png_data).unwrap(); // Save
}
#[derive(Debug)]
struct Vertex {
x: f32,
y: f32,
coverage: f32
}
#[derive(Debug)]
struct Triangle {
v: [Vertex; 3],
}
impl Index<usize> for Triangle {
type Output = Vertex;
fn index(&self, index: usize) -> &Self::Output {
&self.v[index]
}
}
// D3D11 mandates 8 bit subpixel precision:
// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
const FIXED_SHIFT: i32 = 8;
const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
/* Proper rounding of float to integer */
fn iround(mut v: f32) -> i64 {
if v > 0.0 {
v += 0.5;
}
if v < 0.0 {
v -= 0.5;
}
return v as i64
}
/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
fn rast_triangle(buffer: &mut [u32], stride: usize, tri: &Triangle, color: u32) {
let center_offset = -0.5;
let mut coverage1 = tri[0].coverage;
let mut coverage2 = tri[1].coverage;
let mut coverage3 = tri[2].coverage;
/* fixed point coordinates */
let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
/* Force correct vertex order */
let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
if cross > 0 {
std::mem::swap(&mut x1, &mut x3);
std::mem::swap(&mut y1, &mut y3);
// I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
std::mem::swap(&mut coverage2, &mut coverage3);
} else {
std::mem::swap(&mut coverage1, &mut coverage3);
}
/* Deltas */
let dx12 = x1 - x2;
let dx23 = x2 - x3;
let dx31 = x3 - x1;
let dy12 = y1 - y2;
let dy23 = y2 - y3;
let dy31 = y3 - y1;
/* Fixed-point deltas */
let fdx12 = dx12 << FIXED_SHIFT;
let fdx23 = dx23 << FIXED_SHIFT;
let fdx31 = dx31 << FIXED_SHIFT;
let fdy12 = dy12 << FIXED_SHIFT;
let fdy23 = dy23 << FIXED_SHIFT;
let fdy31 = dy31 << FIXED_SHIFT;
/* Bounding rectangle */
let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
minx = minx.max(0);
maxx = maxx.min(WIDTH as i64 - 1);
miny = miny.max(0);
maxy = maxy.min(HEIGHT as i64 - 1);
/* Half-edge constants */
let mut c1 = dy12 * x1 - dx12 * y1;
let mut c2 = dy23 * x2 - dx23 * y2;
let mut c3 = dy31 * x3 - dx31 * y3;
/* Correct for top-left filling convention */
if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
/* Perform rasterization */
let mut buffer = &mut buffer[miny as usize * stride..];
for _y in miny..=maxy {
let mut cx1 = cy1;
let mut cx2 = cy2;
let mut cx3 = cy3;
for x in minx..=maxx {
if cx1 > 0 && cx2 > 0 && cx3 > 0 {
// cross is equal to 2*area of the triangle.
// we can normalize cx by 2*area to get barycentric coords.
let area = cross.abs() as f32;
let bary = (cx1 as f32 / area, cx2 as f32/ area, cx3 as f32 / area);
let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
let color = alpha_mul(color, (coverages * 256. + 0.5) as u32);
buffer[x as usize] = over(color, buffer[x as usize]);
}
cx1 -= fdy12;
cx2 -= fdy23;
cx3 -= fdy31;
}
cy1 += fdx12;
cy2 += fdx23;
cy3 += fdx31;
buffer = &mut buffer[stride..];
}
}
fn main() {
let opt = usvg::Options::default();
let rtree = usvg::Tree::from_file("tiger.svg", &opt).unwrap();
let mut image = vec![0; (WIDTH * HEIGHT) as usize];
for _ in 0..1 {
let mut total_vertex_count = 0;
let mut total_time = std::time::Duration::default();
for node in rtree.root().descendants() {
use usvg::NodeExt;
let t = node.transform();
let transform = Transform2D::new(
t.a as f32, t.b as f32,
t.c as f32, t.d as f32,
t.e as f32, t.f as f32,
);
let s = 1.;
if let usvg::NodeKind::Path(ref usvg_path) = *node.borrow() {
let color = match usvg_path.fill {
Some(ref fill) => {
match fill.paint {
usvg::Paint::Color(c) => 0xff000000 | (c.red as u32) << 16 | (c.green as u32) << 8 | c.blue as u32,
_ => 0xff00ff00,
}
}
None => {
continue;
}
};
let mut builder = PathBuilder::new();
//dbg!(&usvg_path.segments);
for segment in &usvg_path.segments {
match *segment {
usvg::PathSegment::MoveTo { x, y } => {
let p = transform.transform_point(point2(x as f32, y as f32)) * s;
builder.move_to(p.x, p.y);
}
usvg::PathSegment::LineTo { x, y } => {
let p = transform.transform_point(point2(x as f32, y as f32)) * s;
builder.line_to(p.x, p.y);
}
usvg::PathSegment::CurveTo { x1, y1, x2, y2, x, y, } => {
let c1 = transform.transform_point(point2(x1 as f32, y1 as f32)) * s;
let c2 = transform.transform_point(point2(x2 as f32, y2 as f32)) * s;
let p = transform.transform_point(point2(x as f32, y as f32)) * s;
builder.curve_to(
c1.x, c1.y,
c2.x, c2.y,
p.x, p.y,
);
}
usvg::PathSegment::ClosePath => {
builder.close();
}
}
}
let start = std::time::Instant::now();
let result = builder.rasterize_to_tri_list(0, 0, WIDTH as i32, HEIGHT as i32);
let end = std::time::Instant::now();
total_time += end - start;
println!("vertices {}", result.len());
total_vertex_count += result.len();
if result.len() == 0 {
continue;
}
for n in (0..result.len()).step_by(3) {
let vertices = {
[&result[n], &result[n+1], &result[n+2]]
};
let src = color;
let tri = Triangle { v: [
Vertex { x: vertices[0].x, y: vertices[0].y, coverage: vertices[0].coverage},
Vertex { x: vertices[1].x, y: vertices[1].y, coverage: vertices[1].coverage},
Vertex { x: vertices[2].x, y: vertices[2].y, coverage: vertices[2].coverage}
]
};
rast_triangle(&mut image, WIDTH as usize, &tri, src);
}
}
}
println!("total vertex count {}, took {}ms", total_vertex_count, total_time.as_secs_f32()*1000.);
}
write_image(&image, "out.png");
use std::{hash::{Hash, Hasher}, collections::hash_map::DefaultHasher};
use crate::*;
fn calculate_hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
assert_eq!(calculate_hash(&image),
if cfg!(debug_assertions) { 0x5973c52a1c0232f3 } else { 0xf15821a5bebc5ecf});
}

View file

@ -0,0 +1,26 @@
// Output an .obj file of the generated mesh. Viewable at https://3dviewer.net/
fn output_obj_file(data: &[OutputVertex]) {
for v in data {
let color = v.coverage;
println!("v {} {} {} {} {} {}", v.x, v.y, 0., color, color, color);
}
// output a standard triangle strip face list
for n in (1..data.len()-1).step_by(3) {
println!("f {} {} {}", n, n+1, n+2);
}
}
use wpf_gpu_raster::{PathBuilder, OutputVertex};
fn main() {
let mut p = PathBuilder::new();
p.move_to(10., 10.0);
p.line_to(30., 10.);
p.line_to(50., 20.);
p.line_to(30., 30.);
p.line_to(10., 30.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
output_obj_file(&result)
}

View file

@ -0,0 +1,11 @@
use wpf_gpu_raster::PathBuilder;
fn main() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(10., 30.);
p.line_to(30., 30.);
p.line_to(30., 10.);
p.close();
let _result = p.rasterize_to_tri_list(0, 0, 100, 100);
//dbg!(result);
}

8
third_party/rust/wpf-gpu-raster/notes vendored Normal file
View file

@ -0,0 +1,8 @@
bezier flattening
-----------------
if we make sure we flatten beziers to integer y values we can avoid having to hit
the slow complex coverage path
We can probably do this by using a skia style flattener.
Normally we compute a series of line segments using partial differencing.
I think we can adjust the line towards an integer y value by having small partial differences that we can move by.

View file

@ -0,0 +1,629 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//------------------------------------------------------------------------------
//
use std::cell::Cell;
use typed_arena_nomut::Arena;
//
// Description:
// Coverage buffer implementation
#[cfg(debug_assertions)]
use crate::aarasterizer::AssertActiveList;
use crate::aarasterizer::CEdge;
use crate::nullable_ref::Ref;
use crate::types::*;
//struct CEdge;
//struct CInactiveEdge;
//-------------------------------------------------------------------------
//
// TrapezoidalAA only supports 8x8 mode, so the shifts/masks are all
// constants. Also, since we must be symmetrical, x and y shifts are
// merged into one shift unlike the implementation in aarasterizer.
//
//-------------------------------------------------------------------------
pub const c_nShift: INT = 3;
pub const c_nShiftSize: INT = 8;
pub const c_nShiftSizeSquared: INT = c_nShiftSize * c_nShiftSize;
pub const c_nHalfShiftSize: INT = 4;
pub const c_nShiftMask: INT = 7;
//pub const c_rShiftSize: f32 = 8.0;
//pub const c_rHalfShiftSize: f32 = 4.0;
pub const c_rInvShiftSize: f32 = 1.0/8.0;
pub const c_antiAliasMode: MilAntiAliasMode = MilAntiAliasMode::EightByEight;
//
// Interval coverage descriptor for our antialiased filler
//
pub struct CCoverageInterval<'a>
{
pub m_pNext: Cell<Ref<'a, CCoverageInterval<'a>>>, // m_pNext interval (look for sentinel, not NULL)
pub m_nPixelX: Cell<INT>, // Interval's left edge (m_pNext->X is the right edge)
pub m_nCoverage: Cell<INT>, // Pixel coverage for interval
}
impl<'a> Default for CCoverageInterval<'a> {
fn default() -> Self {
Self { m_pNext: Cell::new(unsafe { Ref::null() } ), m_nPixelX: Default::default(), m_nCoverage: Default::default() }
}
}
// Define our on-stack storage use. The 'free' versions are nicely tuned
// to avoid allocations in most common scenarios, while at the same time
// not chewing up toooo much stack space.
//
// We make the debug versions small so that we hit the 'grow' cases more
// frequently, for better testing:
#[cfg(debug_assertions)]
// Must be at least 6 now: 4 for the "minus4" logic in hwrasterizer.*, and then
// 1 each for the head and tail sentinels (since their allocation doesn't use Grow).
const INTERVAL_BUFFER_NUMBER: usize = 8;
#[cfg(not(debug_assertions))]
const INTERVAL_BUFFER_NUMBER: usize = 32;
//
// Allocator structure for the antialiased fill interval data
//
struct CCoverageIntervalBuffer<'a>
{
m_pNext: Cell<Option<& 'a CCoverageIntervalBuffer<'a>>>,
m_interval: [CCoverageInterval<'a>; INTERVAL_BUFFER_NUMBER],
}
impl<'a> Default for CCoverageIntervalBuffer<'a> {
fn default() -> Self {
Self { m_pNext: Cell::new(None), m_interval: Default::default() }
}
}
//------------------------------------------------------------------------------
//
// Class: CCoverageBuffer
//
// Description:
// Coverage buffer implementation that maintains coverage information
// for one scanline.
//
// This implementation will maintain a linked list of intervals consisting
// of x value in pixel space and a coverage value that applies for all pixels
// between pInterval->X and pInterval->Next->X.
//
// For example, if we add the following interval (assuming 8x8 anti-aliasing)
// to the coverage buffer:
// _____ _____ _____ _____
// | | | | |
// | ------------------- |
// |_____|_____|_____|_____|
// (0,0) (1,0) (2,0) (3,0) (4,0)
//
// Then we will get the following coverage buffer:
//
// m_nPixelX: INT_MIN | 0 | 1 | 3 | 4 | INT_MAX
// m_nCoverage: 0 | 4 | 8 | 4 | 0 | 0xdeadbeef
// m_pNext: -------->|---->|---->|---->|---->| NULL
//
//------------------------------------------------------------------------------
pub struct CCoverageBuffer<'a>
{
/*
public:
//
// Init/Destroy methods
//
VOID Initialize();
VOID Destroy();
//
// Setup the buffer so that it can accept another scanline
//
VOID Reset();
//
// Add a subpixel interval to the coverage buffer
//
HRESULT FillEdgesAlternating(
__in_ecount(1) const CEdge *pEdgeActiveList,
INT nSubpixelYCurrent
);
HRESULT FillEdgesWinding(
__in_ecount(1) const CEdge *pEdgeActiveList,
INT nSubpixelYCurrent
);
HRESULT AddInterval(INT nSubpixelXLeft, INT nSubpixelXRight);
private:
HRESULT Grow(
__deref_out_ecount(1) CCoverageInterval **ppIntervalNew,
__deref_out_ecount(1) CCoverageInterval **ppIntervalEndMinus4
);
public:*/
pub m_pIntervalStart: Cell<Ref<'a, CCoverageInterval<'a>>>, // Points to list head entry
//private:
m_pIntervalNew: Cell<Ref<'a, CCoverageInterval<'a>>>,
interval_new_index: Cell<usize>,
// The Minus4 in the below variable refers to the position at which
// we need to Grow the buffer. The buffer is grown once before an
// AddInterval, so the Grow has to ensure that there are enough
// intervals for the AddInterval worst case which is the following:
//
// 1 2 3 4
// *_____*_____ _____*_____*
// | | | | |
// | ---|-----------|--- |
// |_____|_____|_____|_____|
//
// Note that the *'s above mark potentional insert points in the list,
// so we need to ensure that at least 4 intervals can be allocated.
//
m_pIntervalEndMinus4: Cell<Ref<'a, CCoverageInterval<'a>>>,
m_pIntervalBufferBuiltin: CCoverageIntervalBuffer<'a>,
m_pIntervalBufferCurrent: Cell<Ref<'a, CCoverageIntervalBuffer<'a>>>,
arena: Arena<CCoverageIntervalBuffer<'a>>
// Disable instrumentation checks within all methods of this class
//SET_MILINSTRUMENTATION_FLAGS(MILINSTRUMENTATIONFLAGS_DONOTHING);
}
impl<'a> Default for CCoverageBuffer<'a> {
fn default() -> Self {
Self {
m_pIntervalStart: Cell::new(unsafe { Ref::null() }),
m_pIntervalNew: Cell::new(unsafe { Ref::null() }),
m_pIntervalEndMinus4: Cell::new(unsafe { Ref::null() }),
m_pIntervalBufferBuiltin: Default::default(),
m_pIntervalBufferCurrent: unsafe { Cell::new(Ref::null()) },
arena: Arena::new(),
interval_new_index: Cell::new(0),
}
}
}
//
// Inlines
//
impl<'a> CCoverageBuffer<'a> {
//-------------------------------------------------------------------------
//
// Function: CCoverageBuffer::AddInterval
//
// Synopsis: Add a subpixel resolution interval to the coverage buffer
//
//-------------------------------------------------------------------------
pub fn AddInterval(&'a self, nSubpixelXLeft: INT, nSubpixelXRight: INT) -> HRESULT
{
let hr: HRESULT = S_OK;
let mut nPixelXNext: INT;
let nPixelXLeft: INT;
let nPixelXRight: INT;
let nCoverageLeft: INT; // coverage from right edge of pixel for interval start
let nCoverageRight: INT; // coverage from left edge of pixel for interval end
let mut pInterval = self.m_pIntervalStart.get();
let mut pIntervalNew = self.m_pIntervalNew.get();
let mut interval_new_index = self.interval_new_index.get();
let mut pIntervalEndMinus4 = self.m_pIntervalEndMinus4.get();
// Make sure we have enough room to add two intervals if
// necessary:
if (pIntervalNew >= pIntervalEndMinus4)
{
IFC!(self.Grow(&mut pIntervalNew, &mut pIntervalEndMinus4, &mut interval_new_index));
}
// Convert interval to pixel space so that we can insert it
// into the coverage buffer
debug_assert!(nSubpixelXLeft < nSubpixelXRight);
nPixelXLeft = nSubpixelXLeft >> c_nShift;
nPixelXRight = nSubpixelXRight >> c_nShift;
// Skip any intervals less than 'nPixelLeft':
loop {
nPixelXNext = pInterval.m_pNext.get().m_nPixelX.get();
if !(nPixelXNext < nPixelXLeft) { break }
pInterval = pInterval.m_pNext.get();
}
// Insert a new interval if necessary:
if (nPixelXNext != nPixelXLeft)
{
pIntervalNew.m_nPixelX.set(nPixelXLeft);
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
pInterval.m_pNext.set(pIntervalNew);
pInterval = pIntervalNew;
interval_new_index += 1;
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
}
else
{
pInterval = (*pInterval).m_pNext.get();
}
//
// Compute coverage for left segment as shown by the *'s below
//
// |_____|_____|_____|_
// | | | |
// | ***---------- |
// |_____|_____|_____|
//
nCoverageLeft = c_nShiftSize - (nSubpixelXLeft & c_nShiftMask);
// If nCoverageLeft == 0, then the value of nPixelXLeft is wrong
// and should have been equal to nPixelXLeft+1.
debug_assert!(nCoverageLeft > 0);
// If we have partial coverage, then ensure that we have a position
// for the end of the pixel
if ((nCoverageLeft < c_nShiftSize || (nPixelXLeft == nPixelXRight))
&& nPixelXLeft + 1 != pInterval.m_pNext.get().m_nPixelX.get())
{
pIntervalNew.m_nPixelX.set(nPixelXLeft + 1);
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
pInterval.m_pNext.set(pIntervalNew);
interval_new_index += 1;
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
}
//
// If the interval only includes one pixel, then the coverage is
// nSubpixelXRight - nSubpixelXLeft
//
if (nPixelXLeft == nPixelXRight)
{
pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + nSubpixelXRight - nSubpixelXLeft);
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
//goto Cleanup;
//Cleanup:
// Update the coverage buffer new interval
self.interval_new_index.set(interval_new_index);
self.m_pIntervalNew.set(pIntervalNew);
return hr;
}
// Update coverage of current interval
pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + nCoverageLeft);
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
// Increase the coverage for any intervals between 'nPixelXLeft'
// and 'nPixelXRight':
loop {
(nPixelXNext = pInterval.m_pNext.get().m_nPixelX.get());
if !(nPixelXNext < nPixelXRight) {
break;
}
pInterval = pInterval.m_pNext.get();
pInterval.m_nCoverage.set(pInterval.m_nCoverage.get() + c_nShiftSize);
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
}
// Insert another new interval if necessary:
if (nPixelXNext != nPixelXRight)
{
pIntervalNew.m_nPixelX.set(nPixelXRight);
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get() - c_nShiftSize);
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
pInterval.m_pNext.set(pIntervalNew);
pInterval = pIntervalNew;
interval_new_index += 1;
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
}
else
{
pInterval = pInterval.m_pNext.get();
}
//
// Compute coverage for right segment as shown by the *'s below
//
// |_____|_____|_____|_
// | | | |
// | ---------**** |
// |_____|_____|_____|
//
nCoverageRight = nSubpixelXRight & c_nShiftMask;
if (nCoverageRight > 0)
{
if (nPixelXRight + 1 != (*(*pInterval).m_pNext.get()).m_nPixelX.get())
{
pIntervalNew.m_nPixelX.set(nPixelXRight + 1);
pIntervalNew.m_nCoverage.set(pInterval.m_nCoverage.get());
pIntervalNew.m_pNext.set(pInterval.m_pNext.get());
pInterval.m_pNext.set(pIntervalNew);
interval_new_index += 1;
pIntervalNew = Ref::new(&Ref::get_ref(self.m_pIntervalBufferCurrent.get()).m_interval[interval_new_index])
}
pInterval.m_nCoverage.set((*pInterval).m_nCoverage.get() + nCoverageRight);
debug_assert!(pInterval.m_nCoverage.get() <= c_nShiftSize*c_nShiftSize);
}
//Cleanup:
// Update the coverage buffer new interval
self.interval_new_index.set(interval_new_index);
self.m_pIntervalNew.set(pIntervalNew);
return hr;
}
//-------------------------------------------------------------------------
//
// Function: CCoverageBuffer::FillEdgesAlternating
//
// Synopsis:
// Given the active edge list for the current scan, do an alternate-mode
// antialiased fill.
//
//-------------------------------------------------------------------------
pub fn FillEdgesAlternating(&'a self,
pEdgeActiveList: Ref<CEdge>,
nSubpixelYCurrent: INT
) -> HRESULT
{
let hr: HRESULT = S_OK;
let mut pEdgeStart: Ref<CEdge> = (*pEdgeActiveList).Next.get();
let mut pEdgeEnd: Ref<CEdge>;
let mut nSubpixelXLeft: INT;
let mut nSubpixelXRight: INT;
ASSERTACTIVELIST!(pEdgeActiveList, nSubpixelYCurrent);
while (pEdgeStart.X.get() != INT::MAX)
{
pEdgeEnd = pEdgeStart.Next.get();
// We skip empty pairs:
(nSubpixelXLeft = pEdgeStart.X.get());
if (nSubpixelXLeft != pEdgeEnd.X.get())
{
// We now know we have a non-empty interval. Skip any
// empty interior pairs:
while ({(nSubpixelXRight = pEdgeEnd.X.get()); pEdgeEnd.X == pEdgeEnd.Next.get().X})
{
pEdgeEnd = pEdgeEnd.Next.get().Next.get();
}
debug_assert!((nSubpixelXLeft < nSubpixelXRight) && (nSubpixelXRight < INT::MAX));
IFC!(self.AddInterval(nSubpixelXLeft, nSubpixelXRight));
}
// Prepare for the next iteration:
pEdgeStart = pEdgeEnd.Next.get();
}
//Cleanup:
return hr
}
//-------------------------------------------------------------------------
//
// Function: CCoverageBuffer::FillEdgesWinding
//
// Synopsis:
// Given the active edge list for the current scan, do an alternate-mode
// antialiased fill.
//
//-------------------------------------------------------------------------
pub fn FillEdgesWinding(&'a self,
pEdgeActiveList: Ref<CEdge>,
nSubpixelYCurrent: INT
) -> HRESULT
{
let hr: HRESULT = S_OK;
let mut pEdgeStart: Ref<CEdge> = pEdgeActiveList.Next.get();
let mut pEdgeEnd: Ref<CEdge>;
let mut nSubpixelXLeft: INT;
let mut nSubpixelXRight: INT;
let mut nWindingValue: INT;
ASSERTACTIVELIST!(pEdgeActiveList, nSubpixelYCurrent);
while (pEdgeStart.X.get() != INT::MAX)
{
pEdgeEnd = pEdgeStart.Next.get();
nWindingValue = pEdgeStart.WindingDirection;
while ({nWindingValue += pEdgeEnd.WindingDirection; nWindingValue != 0})
{
pEdgeEnd = pEdgeEnd.Next.get();
}
debug_assert!(pEdgeEnd.X.get() != INT::MAX);
// We skip empty pairs:
if ({nSubpixelXLeft = pEdgeStart.X.get(); nSubpixelXLeft != pEdgeEnd.X.get()})
{
// We now know we have a non-empty interval. Skip any
// empty interior pairs:
while ({nSubpixelXRight = pEdgeEnd.X.get(); nSubpixelXRight == pEdgeEnd.Next.get().X.get()})
{
pEdgeStart = pEdgeEnd.Next.get();
pEdgeEnd = pEdgeStart.Next.get();
nWindingValue = pEdgeStart.WindingDirection;
while ({nWindingValue += pEdgeEnd.WindingDirection; nWindingValue != 0})
{
pEdgeEnd = pEdgeEnd.Next.get();
}
}
debug_assert!((nSubpixelXLeft < nSubpixelXRight) && (nSubpixelXRight < INT::MAX));
IFC!(self.AddInterval(nSubpixelXLeft, nSubpixelXRight));
}
// Prepare for the next iteration:
pEdgeStart = pEdgeEnd.Next.get();
}
//Cleanup:
return hr;//RRETURN(hr);
}
//-------------------------------------------------------------------------
//
// Function: CCoverageBuffer::Initialize
//
// Synopsis: Set the coverage buffer to a valid initial state
//
//-------------------------------------------------------------------------
pub fn Initialize(&'a self)
{
self.m_pIntervalBufferBuiltin.m_interval[0].m_nPixelX.set(INT::MIN);
self.m_pIntervalBufferBuiltin.m_interval[0].m_nCoverage.set(0);
self.m_pIntervalBufferBuiltin.m_interval[0].m_pNext.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
self.m_pIntervalBufferBuiltin.m_interval[1].m_nPixelX.set(INT::MAX);
self.m_pIntervalBufferBuiltin.m_interval[1].m_nCoverage.set(0xdeadbeef);
self.m_pIntervalBufferBuiltin.m_interval[1].m_pNext.set(unsafe { Ref::null() });
self.m_pIntervalBufferBuiltin.m_pNext.set(None);
self.m_pIntervalBufferCurrent.set(Ref::new(&self.m_pIntervalBufferBuiltin));
self.m_pIntervalStart.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[0]));
self.m_pIntervalNew.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[2]));
self.interval_new_index.set(2);
self.m_pIntervalEndMinus4.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[INTERVAL_BUFFER_NUMBER - 4]));
}
//-------------------------------------------------------------------------
//
// Function: CCoverageBuffer::Destroy
//
// Synopsis: Free all allocated buffers
//
//-------------------------------------------------------------------------
pub fn Destroy(&mut self)
{
// Free the linked-list of allocations (skipping 'm_pIntervalBufferBuiltin',
// which is built into the class):
}
//-------------------------------------------------------------------------
//
// Function: CCoverageBuffer::Reset
//
// Synopsis: Reset the coverage buffer
//
//-------------------------------------------------------------------------
pub fn Reset(&'a self)
{
// Reset our coverage structure. Point the head back to the tail,
// and reset where the next new entry will be placed:
self.m_pIntervalBufferBuiltin.m_interval[0].m_pNext.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[1]));
self.m_pIntervalBufferCurrent.set(Ref::new(&self.m_pIntervalBufferBuiltin));
self.m_pIntervalNew.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[2]));
self.interval_new_index.set(2);
self.m_pIntervalEndMinus4.set(Ref::new(&self.m_pIntervalBufferBuiltin.m_interval[INTERVAL_BUFFER_NUMBER - 4]));
}
//-------------------------------------------------------------------------
//
// Function: CCoverageBuffer::Grow
//
// Synopsis:
// Grow our interval buffer.
//
//-------------------------------------------------------------------------
fn Grow(&'a self,
ppIntervalNew: &mut Ref<'a, CCoverageInterval<'a>>,
ppIntervalEndMinus4: &mut Ref<'a, CCoverageInterval<'a>>,
interval_new_index: &mut usize
) -> HRESULT
{
let hr: HRESULT = S_OK;
let pIntervalBufferNew = (*self.m_pIntervalBufferCurrent.get()).m_pNext.get();
let pIntervalBufferNew = pIntervalBufferNew.unwrap_or_else(||
{
let pIntervalBufferNew = self.arena.alloc(Default::default());
(*pIntervalBufferNew).m_pNext.set(None);
(*self.m_pIntervalBufferCurrent.get()).m_pNext.set(Some(pIntervalBufferNew));
pIntervalBufferNew
});
self.m_pIntervalBufferCurrent.set(Ref::new(pIntervalBufferNew));
self.m_pIntervalNew.set(Ref::new(&(*pIntervalBufferNew).m_interval[2]));
self.interval_new_index.set(2);
self.m_pIntervalEndMinus4.set(Ref::new(&(*pIntervalBufferNew).m_interval[INTERVAL_BUFFER_NUMBER - 4]));
*ppIntervalNew = self.m_pIntervalNew.get();
*ppIntervalEndMinus4 = self.m_pIntervalEndMinus4.get();
*interval_new_index = 2;
return hr;
}
}
/*
impl<'a> Drop for CCoverageBuffer<'a> {
fn drop(&mut self) {
self.Destroy();
}
}*/

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,990 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
//+-----------------------------------------------------------------------------
//
// class Bezier32
//
// Bezier cracker.
//
// A hybrid cubic Bezier curve flattener based on KirkO's error factor.
// Generates line segments fast without using the stack. Used to flatten a
// path.
//
// For an understanding of the methods used, see:
//
// Kirk Olynyk, "..."
// Goossen and Olynyk, "System and Method of Hybrid Forward
// Differencing to Render Bezier Splines"
// Lien, Shantz and Vaughan Pratt, "Adaptive Forward Differencing for
// Rendering Curves and Surfaces", Computer Graphics, July 1987
// Chang and Shantz, "Rendering Trimmed NURBS with Adaptive Forward
// Differencing", Computer Graphics, August 1988
// Foley and Van Dam, "Fundamentals of Interactive Computer Graphics"
//
// Public Interface:
// bInit(pptfx) - pptfx points to 4 control points of
// Bezier. Current point is set to the first
// point after the start-point.
// Bezier32(pptfx) - Constructor with initialization.
// vGetCurrent(pptfx) - Returns current polyline point.
// bCurrentIsEndPoint() - TRUE if current point is end-point.
// vNext() - Moves to next polyline point.
//
#![allow(unused_parens)]
#![allow(non_upper_case_globals)]
//+-----------------------------------------------------------------------------
//
//
// $TAG ENGR
// $Module: win_mil_graphics_geometry
// $Keywords:
//
// $Description:
// Class for flattening a bezier.
//
// $ENDTAG
//
//------------------------------------------------------------------------------
// First conversion from original 28.4 to 18.14 format
const HFD32_INITIAL_SHIFT: i32 = 10;
// Second conversion to 15.17 format
const HFD32_ADDITIONAL_SHIFT: i32 = 3;
// BEZIER_FLATTEN_GDI_COMPATIBLE:
//
// Don't turn on this switch without testing carefully. It's more for
// documentation's sake - to show the values that GDI used - for an error
// tolerance of 2/3.
// It turns out that 2/3 produces very noticable artifacts on antialiased lines,
// so we want to use 1/4 instead.
/*
#ifdef BEZIER_FLATTEN_GDI_COMPATIBLE
// Flatten to an error of 2/3. During initial phase, use 18.14 format.
#define TEST_MAGNITUDE_INITIAL (6 * 0x00002aa0L)
// Error of 2/3. During normal phase, use 15.17 format.
#define TEST_MAGNITUDE_NORMAL (TEST_MAGNITUDE_INITIAL << 3)
#else
*/
use crate::types::*;
/*
// Flatten to an error of 1/4. During initial phase, use 18.14 format.
const TEST_MAGNITUDE_INITIAL: i32 = (6 * 0x00001000);
// Error of 1/4. During normal phase, use 15.17 format.
const TEST_MAGNITUDE_NORMAL: i32 = (TEST_MAGNITUDE_INITIAL << 3);
*/
// I have modified the constants for HFD32 as part of fixing accuracy errors
// (Bug 816015). Something similar could be done for the 64 bit hfd, but it ain't
// broke so I'd rather not fix it.
// The shift to the steady state 15.17 format
const HFD32_SHIFT: LONG = HFD32_INITIAL_SHIFT + HFD32_ADDITIONAL_SHIFT;
// Added to output numbers before rounding back to original representation
const HFD32_ROUND: LONG = 1 << (HFD32_SHIFT - 1);
// The error is tested on max(|e2|, |e3|), which represent 6 times the actual error.
// The flattening tolerance is hard coded to 1/4 in the original geometry space,
// which translates to 4 in 28.4 format. So 6 times that is:
const HFD32_TOLERANCE: LONGLONG = 24;
// During the initial phase, while working in 18.14 format
const HFD32_INITIAL_TEST_MAGNITUDE: LONGLONG = HFD32_TOLERANCE << HFD32_INITIAL_SHIFT;
// During the steady state, while working in 15.17 format
const HFD32_TEST_MAGNITUDE: LONGLONG = HFD32_INITIAL_TEST_MAGNITUDE << HFD32_ADDITIONAL_SHIFT;
// We will stop halving the segment with basis e1, e2, e3, e4 when max(|e2|, |e3|)
// is less than HFD32_TOLERANCE. The operation e2 = (e2 + e3) >> 3 in vHalveStepSize() may
// eat up 3 bits of accuracy. HfdBasis32 starts off with a pad of HFD32_SHIFT zeros, so
// we can stay exact up to HFD32_SHIFT/3 subdivisions. Since every subdivision is guaranteed
// to shift max(|e2|, |e3|) at least by 2, we will subdivide no more than n times if the
// initial max(|e2|, |e3|) is less than than HFD32_TOLERANCE << 2n. But if the initial
// max(|e2|, |e3|) is greater than HFD32_TOLERANCE >> (HFD32_SHIFT / 3) then we may not be
// able to flatten with the 32 bit hfd, so we need to resort to the 64 bit hfd.
const HFD32_MAX_ERROR: INT = (HFD32_TOLERANCE as i32) << ((2 * HFD32_INITIAL_SHIFT) / 3);
// The maximum size of coefficients that can be handled by HfdBasis32.
const HFD32_MAX_SIZE: LONGLONG = 0xffffc000;
// Michka 9/12/03: I found this number in the the body of the code witout any explanation.
// My analysis suggests that we could get away with larger numbers, but if I'm wrong we
// could be in big trouble, so let us stay conservative.
//
// In bInit() we subtract Min(Bezier coeffients) from the original coefficients, so after
// that 0 <= coefficients <= Bound, and the test will be Bound < HFD32_MAX_SIZE. When
// switching to the HFD basis in bInit():
// * e0 is the first Bezier coeffient, so abs(e0) <= Bound.
// * e1 is a difference of non-negative coefficients so abs(e1) <= Bound.
// * e2 and e3 can be written as 12*(p - (q + r)/2) where p,q and r are coefficients.
// 0 <=(q + r)/2 <= Bound, so abs(p - (q + r)/2) <= 2*Bound, hence
// abs(e2), abs(e3) <= 12*Bound.
//
// During vLazyHalveStepSize we add e2 + e3, resulting in absolute value <= 24*Bound.
// Initially HfdBasis32 shifts the numbers by HFD32_INITIAL_SHIFT, so we need to handle
// 24*bounds*(2^HFD32_SHIFT), and that needs to be less than 2^31. So the bounds need to
// be less than 2^(31-HFD32_INITIAL_SHIFT)/24).
//
// For speed, the algorithm uses & rather than < for comparison. To facilitate that we
// replace 24 by 32=2^5, and then the binary representation of the number is of the form
// 0...010...0 with HFD32_SHIFT+5 trailing zeros. By subtracting that from 2^32 = 0xffffffff+1
// we get a number that is 1..110...0 with the same number of trailing zeros, and that can be
// used with an & for comparison. So the number should be:
//
// 0xffffffffL - (1L << (31 - HFD32_INITIAL_SHIFT - 5)) + 1 = (1L << 16) + 1 = 0xffff0000
//
// For the current values of HFD32_INITIAL_SHIFT=10 and HFD32_ADDITIONAL_SHIFT=3, the steady
// state doesn't pose additional requirements, as shown below.
//
// For some reason the current code uses 0xfffc0000 = (1L << 14) + 1.
//
// Here is why the steady state doesn't pose additional requirements:
//
// In vSteadyState we multiply e0 and e1 by 8, so the requirement is Bounds*2^13 < 2^31,
// or Bounds < 2^18, less stringent than the above.
//
// In vLazyHalveStepSize we cut the error down by subdivision, making abs(e2) and abs(e3)
// less than HFD32_TEST_MAGNITUDE = 24*2^13, well below 2^31.
//
// During all the steady-state operations - vTakeStep, vHalveStepSize and vDoubleStepSize,
// e0 is on the curve and e1 is a difference of 2 points on the curve, so
// abs(e0), abs(e1) < Bounds * 2^13, which requires Bound < 2^(31-13) = 2^18. e2 and e3
// are errors, kept below 6*HFD32_TEST_MAGNITUDE = 216*2^13. Details:
//
// In vTakeStep e2 = 2e2 - e3 keeps abs(e2) < 3*HFD32_TEST_MAGNITUDE = 72*2^13,
// well below 2^31
//
// In vHalveStepSize we add e2 + e3 when their absolute is < 3*HFD32_TEST_MAGNITUDE (because
// this comes after a step), so that keeps the result below 6*HFD32_TEST_MAGNITUDE = 216*2^13.
//
// In vDoubleStepSize we know that abs(e2), abs(e3) < HFD32_TEST_MAGNITUDE/4, otherwise we
// would not have doubled the step.
#[derive(Default)]
struct HfdBasis32
{
e0: LONG,
e1: LONG,
e2: LONG,
e3: LONG,
}
impl HfdBasis32 {
fn lParentErrorDividedBy4(&self) -> LONG {
self.e3.abs().max((self.e2 + self.e2 - self.e3).abs())
}
fn lError(&self) -> LONG
{
self.e2.abs().max(self.e3.abs())
}
fn fxValue(&self) -> INT
{
return((self.e0 + HFD32_ROUND) >> HFD32_SHIFT);
}
fn bInit(&mut self, p1: INT, p2: INT, p3: INT, p4: INT) -> bool
{
// Change basis and convert from 28.4 to 18.14 format:
self.e0 = (p1 ) << HFD32_INITIAL_SHIFT;
self.e1 = (p4 - p1 ) << HFD32_INITIAL_SHIFT;
self.e2 = 6 * (p2 - p3 - p3 + p4);
self.e3 = 6 * (p1 - p2 - p2 + p3);
if (self.lError() >= HFD32_MAX_ERROR)
{
// Large error, will require too many subdivision for this 32 bit hfd
return false;
}
self.e2 <<= HFD32_INITIAL_SHIFT;
self.e3 <<= HFD32_INITIAL_SHIFT;
return true;
}
fn vLazyHalveStepSize(&mut self, cShift: LONG)
{
self.e2 = self.ExactShiftRight(self.e2 + self.e3, 1);
self.e1 = self.ExactShiftRight(self.e1 - self.ExactShiftRight(self.e2, cShift), 1);
}
fn vSteadyState(&mut self, cShift: LONG)
{
// We now convert from 18.14 fixed format to 15.17:
self.e0 <<= HFD32_ADDITIONAL_SHIFT;
self.e1 <<= HFD32_ADDITIONAL_SHIFT;
let mut lShift = cShift - HFD32_ADDITIONAL_SHIFT;
if (lShift < 0)
{
lShift = -lShift;
self.e2 <<= lShift;
self.e3 <<= lShift;
}
else
{
self.e2 >>= lShift;
self.e3 >>= lShift;
}
}
fn vHalveStepSize(&mut self)
{
self.e2 = self.ExactShiftRight(self.e2 + self.e3, 3);
self.e1 = self.ExactShiftRight(self.e1 - self.e2, 1);
self.e3 = self.ExactShiftRight(self.e3, 2);
}
fn vDoubleStepSize(&mut self)
{
self.e1 += self.e1 + self.e2;
self.e3 <<= 2;
self.e2 = (self.e2 << 3) - self.e3;
}
fn vTakeStep(&mut self)
{
self.e0 += self.e1;
let lTemp = self.e2;
self.e1 += lTemp;
self.e2 += lTemp - self.e3;
self.e3 = lTemp;
}
fn ExactShiftRight(&self, num: i32, shift: i32) -> i32
{
// Performs a shift to the right while asserting that we're not
// losing significant bits
assert!(num == (num >> shift) << shift);
return num >> shift;
}
}
fn vBoundBox(
aptfx: &[POINT; 4]) -> RECT
{
let mut left = aptfx[0].x;
let mut right = aptfx[0].x;
let mut top = aptfx[0].y;
let mut bottom = aptfx[0].y;
for i in 1..4
{
left = left.min(aptfx[i].x);
top = top.min(aptfx[i].y);
right = right.max(aptfx[i].x);
bottom = bottom.max(aptfx[i].y);
}
// We make the bounds one pixel loose for the nominal width
// stroke case, which increases the bounds by half a pixel
// in every dimension:
RECT { left: left - 16, top: top - 16, right: right + 16, bottom: bottom + 16}
}
fn bIntersect(
a: &RECT,
b: &RECT) -> bool
{
return((a.left < b.right) &&
(a.top < b.bottom) &&
(a.right > b.left) &&
(a.bottom > b.top));
}
#[derive(Default)]
pub struct Bezier32
{
cSteps: LONG,
x: HfdBasis32,
y: HfdBasis32,
rcfxBound: RECT
}
impl Bezier32 {
fn bInit(&mut self,
aptfxBez: &[POINT; 4],
// Pointer to 4 control points
prcfxClip: Option<&RECT>) -> bool
// Bound box of visible region (optional)
{
let mut aptfx;
let mut cShift = 0; // Keeps track of 'lazy' shifts
self.cSteps = 1; // Number of steps to do before reach end of curve
self.rcfxBound = vBoundBox(aptfxBez);
aptfx = aptfxBez.clone();
{
let mut fxOr;
let mut fxOffset;
// find out if the coordinates minus the bounding box
// exceed 10 bits
fxOffset = self.rcfxBound.left;
fxOr = {aptfx[0].x -= fxOffset; aptfx[0].x};
fxOr |= {aptfx[1].x -= fxOffset; aptfx[1].x};
fxOr |= {aptfx[2].x -= fxOffset; aptfx[2].x};
fxOr |= {aptfx[3].x -= fxOffset; aptfx[3].x};
fxOffset = self.rcfxBound.top;
fxOr |= {aptfx[0].y -= fxOffset; aptfx[0].y};
fxOr |= {aptfx[1].y -= fxOffset; aptfx[1].y};
fxOr |= {aptfx[2].y -= fxOffset; aptfx[2].y};
fxOr |= {aptfx[3].y -= fxOffset; aptfx[3].y};
// This 32 bit cracker can only handle points in a 10 bit space:
if ((fxOr as i64 & HFD32_MAX_SIZE) != 0) {
return false;
}
}
if (!self.x.bInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x))
{
return false;
}
if (!self.y.bInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y))
{
return false;
}
if (match prcfxClip { None => true, Some(clip) => bIntersect(&self.rcfxBound, clip)})
{
loop {
let lTestMagnitude = (HFD32_INITIAL_TEST_MAGNITUDE << cShift) as LONG;
if (self.x.lError() <= lTestMagnitude && self.y.lError() <= lTestMagnitude) {
break;
}
cShift += 2;
self.x.vLazyHalveStepSize(cShift);
self.y.vLazyHalveStepSize(cShift);
self.cSteps <<= 1;
}
}
self.x.vSteadyState(cShift);
self.y.vSteadyState(cShift);
// Note that this handles the case where the initial error for
// the Bezier is already less than HFD32_TEST_MAGNITUDE:
self.x.vTakeStep();
self.y.vTakeStep();
self.cSteps-=1;
return true;
}
fn cFlatten(&mut self,
mut pptfx: &mut [POINT],
pbMore: &mut bool) -> i32
{
let mut cptfx = pptfx.len();
assert!(cptfx > 0);
let cptfxOriginal = cptfx;
while {
// Return current point:
pptfx[0].x = self.x.fxValue() + self.rcfxBound.left;
pptfx[0].y = self.y.fxValue() + self.rcfxBound.top;
pptfx = &mut pptfx[1..];
// If cSteps == 0, that was the end point in the curve!
if (self.cSteps == 0)
{
*pbMore = false;
// '+1' because we haven't decremented 'cptfx' yet:
return(cptfxOriginal - cptfx + 1) as i32;
}
// Okay, we have to step:
if (self.x.lError().max(self.y.lError()) > HFD32_TEST_MAGNITUDE as LONG)
{
self.x.vHalveStepSize();
self.y.vHalveStepSize();
self.cSteps <<= 1;
}
// We are here after vTakeStep. Before that the error max(|e2|,|e3|) was less
// than HFD32_TEST_MAGNITUDE. vTakeStep changed e2 to 2e2-e3. Since
// |2e2-e3| < max(|e2|,|e3|) << 2 and vHalveStepSize is guaranteed to reduce
// max(|e2|,|e3|) by >> 2, no more than one subdivision should be required to
// bring the new max(|e2|,|e3|) back to within HFD32_TEST_MAGNITUDE, so:
assert!(self.x.lError().max(self.y.lError()) <= HFD32_TEST_MAGNITUDE as LONG);
while (!(self.cSteps & 1 != 0) &&
self.x.lParentErrorDividedBy4() <= (HFD32_TEST_MAGNITUDE as LONG >> 2) &&
self.y.lParentErrorDividedBy4() <= (HFD32_TEST_MAGNITUDE as LONG >> 2))
{
self.x.vDoubleStepSize();
self.y.vDoubleStepSize();
self.cSteps >>= 1;
}
self.cSteps -=1 ;
self.x.vTakeStep();
self.y.vTakeStep();
cptfx -= 1;
cptfx != 0
} {}
*pbMore = true;
return cptfxOriginal as i32;
}
}
///////////////////////////////////////////////////////////////////////////
// Bezier64
//
// All math is done using 64 bit fixed numbers in a 36.28 format.
//
// All drawing is done in a 31 bit space, then a 31 bit window offset
// is applied. In the initial transform where we change to the HFD
// basis, e2 and e3 require the most bits precision: e2 = 6(p2 - 2p3 + p4).
// This requires an additional 4 bits precision -- hence we require 36 bits
// for the integer part, and the remaining 28 bits is given to the fraction.
//
// In rendering a Bezier, every 'subdivide' requires an extra 3 bits of
// fractional precision. In order to be reversible, we can allow no
// error to creep in. Since a INT coordinate is 32 bits, and we
// require an additional 4 bits as mentioned above, that leaves us
// 28 bits fractional precision -- meaning we can do a maximum of
// 9 subdivides. Now, the maximum absolute error of a Bezier curve in 27
// bit integer space is 2^29 - 1. But 9 subdivides reduces the error by a
// guaranteed factor of 2^18, meaning we can subdivide down only to an error
// of 2^11 before we overflow, when in fact we want to reduce error to less
// than 1.
//
// So what we do is HFD until we hit an error less than 2^11, reverse our
// basis transform to get the four control points of this smaller curve
// (rounding in the process to 32 bits), then invoke another copy of HFD
// on the reduced Bezier curve. We again have enough precision, but since
// its starting error is less than 2^11, we can reduce error to 2^-7 before
// overflowing! We'll start a low HFD after every step of the high HFD.
////////////////////////////////////////////////////////////////////////////
#[derive(Default)]
struct HfdBasis64
{
e0: LONGLONG,
e1: LONGLONG,
e2: LONGLONG,
e3: LONGLONG,
}
impl HfdBasis64 {
fn vParentError(&self) -> LONGLONG
{
(self.e3 << 2).abs().max(((self.e2 << 3) - (self.e3 << 2)).abs())
}
fn vError(&self) -> LONGLONG
{
self.e2.abs().max(self.e3.abs())
}
fn fxValue(&self) -> INT
{
// Convert from 36.28 and round:
let mut eq = self.e0;
eq += (1 << (BEZIER64_FRACTION - 1));
eq >>= BEZIER64_FRACTION;
return eq as LONG as INT;
}
fn vInit(&mut self, p1: INT, p2: INT, p3: INT, p4: INT)
{
let mut eqTmp;
let eqP2 = p2 as LONGLONG;
let eqP3 = p3 as LONGLONG;
// e0 = p1
// e1 = p4 - p1
// e2 = 6(p2 - 2p3 + p4)
// e3 = 6(p1 - 2p2 + p3)
// Change basis:
self.e0 = p1 as LONGLONG; // e0 = p1
self.e1 = p4 as LONGLONG;
self.e2 = eqP2; self.e2 -= eqP3; self.e2 -= eqP3; self.e2 += self.e1; // e2 = p2 - 2*p3 + p4
self.e3 = self.e0; self.e3 -= eqP2; self.e3 -= eqP2; self.e3 += eqP3; // e3 = p1 - 2*p2 + p3
self.e1 -= self.e0; // e1 = p4 - p1
// Convert to 36.28 format and multiply e2 and e3 by six:
self.e0 <<= BEZIER64_FRACTION;
self.e1 <<= BEZIER64_FRACTION;
eqTmp = self.e2; self.e2 += eqTmp; self.e2 += eqTmp; self.e2 <<= (BEZIER64_FRACTION + 1);
eqTmp = self.e3; self.e3 += eqTmp; self.e3 += eqTmp; self.e3 <<= (BEZIER64_FRACTION + 1);
}
fn vUntransform<F: Fn(&mut POINT) -> &mut LONG>(&self,
afx: &mut [POINT; 4], field: F)
{
// Declare some temps to hold our operations, since we can't modify e0..e3.
let mut eqP0;
let mut eqP1;
let mut eqP2;
let mut eqP3;
// p0 = e0
// p1 = e0 + (6e1 - e2 - 2e3)/18
// p2 = e0 + (12e1 - 2e2 - e3)/18
// p3 = e0 + e1
eqP0 = self.e0;
// NOTE PERF: Convert this to a multiply by 6: [andrewgo]
eqP2 = self.e1;
eqP2 += self.e1;
eqP2 += self.e1;
eqP1 = eqP2;
eqP1 += eqP2; // 6e1
eqP1 -= self.e2; // 6e1 - e2
eqP2 = eqP1;
eqP2 += eqP1; // 12e1 - 2e2
eqP2 -= self.e3; // 12e1 - 2e2 - e3
eqP1 -= self.e3;
eqP1 -= self.e3; // 6e1 - e2 - 2e3
// NOTE: May just want to approximate these divides! [andrewgo]
// Or can do a 64 bit divide by 32 bit to get 32 bits right here.
eqP1 /= 18;
eqP2 /= 18;
eqP1 += self.e0;
eqP2 += self.e0;
eqP3 = self.e0;
eqP3 += self.e1;
// Convert from 36.28 format with rounding:
eqP0 += (1 << (BEZIER64_FRACTION - 1)); eqP0 >>= BEZIER64_FRACTION; *field(&mut afx[0]) = eqP0 as LONG;
eqP1 += (1 << (BEZIER64_FRACTION - 1)); eqP1 >>= BEZIER64_FRACTION; *field(&mut afx[1]) = eqP1 as LONG;
eqP2 += (1 << (BEZIER64_FRACTION - 1)); eqP2 >>= BEZIER64_FRACTION; *field(&mut afx[2]) = eqP2 as LONG;
eqP3 += (1 << (BEZIER64_FRACTION - 1)); eqP3 >>= BEZIER64_FRACTION; *field(&mut afx[3]) = eqP3 as LONG;
}
fn vHalveStepSize(&mut self)
{
// e2 = (e2 + e3) >> 3
// e1 = (e1 - e2) >> 1
// e3 >>= 2
self.e2 += self.e3; self.e2 >>= 3;
self.e1 -= self.e2; self.e1 >>= 1;
self.e3 >>= 2;
}
fn vDoubleStepSize(&mut self)
{
// e1 = 2e1 + e2
// e3 = 4e3;
// e2 = 8e2 - e3
self.e1 <<= 1; self.e1 += self.e2;
self.e3 <<= 2;
self.e2 <<= 3; self.e2 -= self.e3;
}
fn vTakeStep(&mut self)
{
self.e0 += self.e1;
let eqTmp = self.e2;
self.e1 += self.e2;
self.e2 += eqTmp; self.e2 -= self.e3;
self.e3 = eqTmp;
}
}
const BEZIER64_FRACTION: LONG = 28;
// The following is our 2^11 target error encoded as a 36.28 number
// (don't forget the additional 4 bits of fractional precision!) and
// the 6 times error multiplier:
const geqErrorHigh: LONGLONG = (6 * (1 << 15) >> (32 - BEZIER64_FRACTION)) << 32;
/*#ifdef BEZIER_FLATTEN_GDI_COMPATIBLE
// The following is the default 2/3 error encoded as a 36.28 number,
// multiplied by 6, and leaving 4 bits for fraction:
const LONGLONG geqErrorLow = (LONGLONG)(4) << 32;
#else*/
// The following is the default 1/4 error encoded as a 36.28 number,
// multiplied by 6, and leaving 4 bits for fraction:
use crate::types::POINT;
const geqErrorLow: LONGLONG = (3) << 31;
//#endif
#[derive(Default)]
pub struct Bezier64
{
xLow: HfdBasis64,
yLow: HfdBasis64,
xHigh: HfdBasis64,
yHigh: HfdBasis64,
eqErrorLow: LONGLONG,
rcfxClip: Option<RECT>,
cStepsHigh: LONG,
cStepsLow: LONG
}
impl Bezier64 {
fn vInit(&mut self,
aptfx: &[POINT; 4],
// Pointer to 4 control points
prcfxVis: Option<&RECT>,
// Pointer to bound box of visible area (may be NULL)
eqError: LONGLONG)
// Fractional maximum error (32.32 format)
{
self.cStepsHigh = 1;
self.cStepsLow = 0;
self.xHigh.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
self.yHigh.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
// Initialize error:
self.eqErrorLow = eqError;
self.rcfxClip = prcfxVis.cloned();
while (((self.xHigh.vError()) > geqErrorHigh) ||
((self.yHigh.vError()) > geqErrorHigh))
{
self.cStepsHigh <<= 1;
self.xHigh.vHalveStepSize();
self.yHigh.vHalveStepSize();
}
}
fn cFlatten(
&mut self,
mut pptfx: &mut [POINT],
pbMore: &mut bool) -> INT
{
let mut aptfx: [POINT; 4] = Default::default();
let mut cptfx = pptfx.len();
let mut rcfxBound: RECT;
let cptfxOriginal = cptfx;
assert!(cptfx > 0);
while {
if (self.cStepsLow == 0)
{
// Optimization that if the bound box of the control points doesn't
// intersect with the bound box of the visible area, render entire
// curve as a single line:
self.xHigh.vUntransform(&mut aptfx, |p| &mut p.x);
self.yHigh.vUntransform(&mut aptfx, |p| &mut p.y);
self.xLow.vInit(aptfx[0].x, aptfx[1].x, aptfx[2].x, aptfx[3].x);
self.yLow.vInit(aptfx[0].y, aptfx[1].y, aptfx[2].y, aptfx[3].y);
self.cStepsLow = 1;
if (match &self.rcfxClip { None => true, Some(clip) => {rcfxBound = vBoundBox(&aptfx); bIntersect(&rcfxBound, &clip)}})
{
while (((self.xLow.vError()) > self.eqErrorLow) ||
((self.yLow.vError()) > self.eqErrorLow))
{
self.cStepsLow <<= 1;
self.xLow.vHalveStepSize();
self.yLow.vHalveStepSize();
}
}
// This 'if' handles the case where the initial error for the Bezier
// is already less than the target error:
if ({self.cStepsHigh -= 1; self.cStepsHigh} != 0)
{
self.xHigh.vTakeStep();
self.yHigh.vTakeStep();
if (((self.xHigh.vError()) > geqErrorHigh) ||
((self.yHigh.vError()) > geqErrorHigh))
{
self.cStepsHigh <<= 1;
self.xHigh.vHalveStepSize();
self.yHigh.vHalveStepSize();
}
while (!(self.cStepsHigh & 1 != 0) &&
((self.xHigh.vParentError()) <= geqErrorHigh) &&
((self.yHigh.vParentError()) <= geqErrorHigh))
{
self.xHigh.vDoubleStepSize();
self.yHigh.vDoubleStepSize();
self.cStepsHigh >>= 1;
}
}
}
self.xLow.vTakeStep();
self.yLow.vTakeStep();
pptfx[0].x = self.xLow.fxValue();
pptfx[0].y = self.yLow.fxValue();
pptfx = &mut pptfx[1..];
self.cStepsLow-=1;
if (self.cStepsLow == 0 && self.cStepsHigh == 0)
{
*pbMore = false;
// '+1' because we haven't decremented 'cptfx' yet:
return(cptfxOriginal - cptfx + 1) as INT;
}
if ((self.xLow.vError() > self.eqErrorLow) ||
(self.yLow.vError() > self.eqErrorLow))
{
self.cStepsLow <<= 1;
self.xLow.vHalveStepSize();
self.yLow.vHalveStepSize();
}
while (!(self.cStepsLow & 1 != 0) &&
((self.xLow.vParentError()) <= self.eqErrorLow) &&
((self.yLow.vParentError()) <= self.eqErrorLow))
{
self.xLow.vDoubleStepSize();
self.yLow.vDoubleStepSize();
self.cStepsLow >>= 1;
}
cptfx -= 1;
cptfx != 0
} {};
*pbMore = true;
return(cptfxOriginal) as INT;
}
}
//+-----------------------------------------------------------------------------
//
// class CMILBezier
//
// Bezier cracker. Flattens any Bezier in our 28.4 device space down to a
// smallest 'error' of 2^-7 = 0.0078. Will use fast 32 bit cracker for small
// curves and slower 64 bit cracker for big curves.
//
// Public Interface:
// vInit(aptfx, prcfxClip, peqError)
// - pptfx points to 4 control points of Bezier. The first point
// retrieved by bNext() is the the first point in the approximation
// after the start-point.
//
// - prcfxClip is an optional pointer to the bound box of the visible
// region. This is used to optimize clipping of Bezier curves that
// won't be seen. Note that this value should account for the pen's
// width!
//
// - optional maximum error in 32.32 format, corresponding to Kirko's
// error factor.
//
// bNext(pptfx)
// - pptfx points to where next point in approximation will be
// returned. Returns FALSE if the point is the end-point of the
// curve.
//
pub (crate) enum CMILBezier
{
Bezier64(Bezier64),
Bezier32(Bezier32)
}
impl CMILBezier {
// All coordinates must be in 28.4 format:
pub fn new(aptfxBez: &[POINT; 4], prcfxClip: Option<&RECT>) -> Self {
let mut bez32 = Bezier32::default();
let bBez32 = bez32.bInit(aptfxBez, prcfxClip);
if bBez32 {
CMILBezier::Bezier32(bez32)
} else {
let mut bez64 = Bezier64::default();
bez64.vInit(aptfxBez, prcfxClip, geqErrorLow);
CMILBezier::Bezier64(bez64)
}
}
// Returns the number of points filled in. This will never be zero.
//
// The last point returned may not be exactly the last control
// point. The workaround is for calling code to add an extra
// point if this is the case.
pub fn Flatten( &mut self,
pptfx: &mut [POINT],
pbMore: &mut bool) -> INT {
match self {
CMILBezier::Bezier32(bez) => bez.cFlatten(pptfx, pbMore),
CMILBezier::Bezier64(bez) => bez.cFlatten(pptfx, pbMore)
}
}
}
#[test]
fn flatten() {
let curve: [POINT; 4] = [
POINT{x: 1715, y: 6506},
POINT{x: 1692, y: 6506},
POINT{x: 1227, y: 5148},
POINT{x: 647, y: 5211}];
let mut bez = CMILBezier::new(&curve, None);
let mut result: [POINT; 32] = Default::default();
let mut more: bool = false;
let count = bez.Flatten(&mut result, &mut more);
assert_eq!(count, 21);
assert_eq!(more, false);
}
#[test]
fn split_flatten32() {
// make sure that flattening a curve into two small buffers matches
// doing it into a large buffer
let curve: [POINT; 4] = [
POINT{x: 1795, y: 8445},
POINT{x: 1795, y: 8445},
POINT{x: 1908, y: 8683},
POINT{x: 2043, y: 8705}];
let mut bez = CMILBezier::new(&curve, None);
let mut result: [POINT; 8] = Default::default();
let mut more: bool = false;
let count = bez.Flatten(&mut result[..5], &mut more);
assert_eq!(count, 5);
assert_eq!(more, true);
let count = bez.Flatten(&mut result[5..], &mut more);
assert_eq!(count, 3);
assert_eq!(more, false);
let mut bez = CMILBezier::new(&curve, None);
let mut full_result: [POINT; 8] = Default::default();
let mut more: bool = false;
let count = bez.Flatten(&mut full_result, &mut more);
assert_eq!(count, 8);
assert_eq!(more, false);
assert!(result == full_result);
}
#[test]
fn flatten32() {
let curve: [POINT; 4] = [
POINT{x: 100, y: 100},
POINT{x: 110, y: 100},
POINT{x: 110, y: 110},
POINT{x: 110, y: 100}];
let mut bez = CMILBezier::new(&curve, None);
let mut result: [POINT; 32] = Default::default();
let mut more: bool = false;
let count = bez.Flatten(&mut result, &mut more);
assert_eq!(count, 3);
assert_eq!(more, false);
}
#[test]
fn flatten32_double_step_size() {
let curve: [POINT; 4] = [
POINT{x: 1761, y: 8152},
POINT{x: 1761, y: 8152},
POINT{x: 1750, y: 8355},
POINT{x: 1795, y: 8445}];
let mut bez = CMILBezier::new(&curve, None);
let mut result: [POINT; 32] = Default::default();
let mut more: bool = false;
let count = bez.Flatten(&mut result, &mut more);
assert_eq!(count, 7);
assert_eq!(more, false);
}
#[test]
fn bezier64_init_high_num_steps() {
let curve: [POINT; 4] = [
POINT{x: 33, y: -1},
POINT{x: -1, y: -1},
POINT{x: -1, y: -16385},
POINT{x: -226, y: 10}];
let mut bez = CMILBezier::new(&curve, None);
let mut result: [POINT; 32] = Default::default();
let mut more: bool = false;
let count = bez.Flatten(&mut result, &mut more);
assert_eq!(count, 32);
assert_eq!(more, true);
}
#[test]
fn bezier64_high_error() {
let curve: [POINT; 4] = [
POINT{x: -1, y: -1},
POINT{x: -4097, y: -1},
POINT{x: 65471, y: -256},
POINT{x: -1, y: 0}];
let mut bez = CMILBezier::new(&curve, None);
let mut result: [POINT; 32] = Default::default();
let mut more: bool = false;
let count = bez.Flatten(&mut result, &mut more);
assert_eq!(count, 32);
assert_eq!(more, true);
}

View file

@ -0,0 +1,133 @@
use crate::{PathBuilder, OutputPath, OutputVertex, FillMode, rasterize_to_tri_list};
use crate::types::{BYTE, POINT};
#[no_mangle]
pub extern "C" fn wgr_new_builder() -> *mut PathBuilder {
let pb = PathBuilder::new();
Box::into_raw(Box::new(pb))
}
#[no_mangle]
pub extern "C" fn wgr_builder_move_to(pb: &mut PathBuilder, x: f32, y: f32) {
pb.move_to(x, y);
}
#[no_mangle]
pub extern "C" fn wgr_builder_line_to(pb: &mut PathBuilder, x: f32, y: f32) {
pb.line_to(x, y);
}
#[no_mangle]
pub extern "C" fn wgr_builder_curve_to(pb: &mut PathBuilder, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
pb.curve_to(c1x, c1y, c2x, c2y, x, y);
}
#[no_mangle]
pub extern "C" fn wgr_builder_quad_to(pb: &mut PathBuilder, cx: f32, cy: f32, x: f32, y: f32) {
pb.quad_to(cx, cy, x, y);
}
#[no_mangle]
pub extern "C" fn wgr_builder_close(pb: &mut PathBuilder) {
pb.close();
}
#[no_mangle]
pub extern "C" fn wgr_builder_set_fill_mode(pb: &mut PathBuilder, fill_mode: FillMode) {
pb.set_fill_mode(fill_mode)
}
#[repr(C)]
pub struct Path {
fill_mode: FillMode,
points: *const POINT,
num_points: usize,
types: *const BYTE,
num_types: usize,
}
impl From<OutputPath> for Path {
fn from(output_path: OutputPath) -> Self {
let path = Self {
fill_mode: output_path.fill_mode,
points: output_path.points.as_ptr(),
num_points: output_path.points.len(),
types: output_path.types.as_ptr(),
num_types: output_path.types.len(),
};
std::mem::forget(output_path);
path
}
}
impl Into<OutputPath> for Path {
fn into(self) -> OutputPath {
OutputPath {
fill_mode: self.fill_mode,
points: unsafe {
if self.points == std::ptr::null() {
Default::default()
} else {
Box::from_raw(std::slice::from_raw_parts_mut(self.points as *mut POINT, self.num_points))
}
},
types: unsafe {
if self.types == std::ptr::null() {
Default::default()
} else {
Box::from_raw(std::slice::from_raw_parts_mut(self.types as *mut BYTE, self.num_types))
}
},
}
}
}
#[no_mangle]
pub extern "C" fn wgr_builder_get_path(pb: &mut PathBuilder) -> Path {
Path::from(pb.get_path().unwrap_or_default())
}
#[repr(C)]
pub struct VertexBuffer {
data: *const OutputVertex,
len: usize
}
#[no_mangle]
pub extern "C" fn wgr_path_rasterize_to_tri_list(
path: &Path,
clip_x: i32,
clip_y: i32,
clip_width: i32,
clip_height: i32,
need_inside: bool,
need_outside: bool,
) -> VertexBuffer {
let result = rasterize_to_tri_list(
path.fill_mode,
unsafe { std::slice::from_raw_parts(path.types, path.num_types) },
unsafe { std::slice::from_raw_parts(path.points, path.num_points) },
clip_x, clip_y, clip_width, clip_height,
need_inside, need_outside,
);
let vb = VertexBuffer { data: result.as_ptr(), len: result.len() };
std::mem::forget(result);
vb
}
#[no_mangle]
pub extern "C" fn wgr_path_release(path: Path) {
let output_path: OutputPath = path.into();
drop(output_path);
}
#[no_mangle]
pub extern "C" fn wgr_vertex_buffer_release(vb: VertexBuffer)
{
unsafe { drop(Box::from_raw(std::slice::from_raw_parts_mut(vb.data as *mut OutputVertex, vb.len))) }
}
#[no_mangle]
pub unsafe extern "C" fn wgr_builder_release(pb: *mut PathBuilder) {
drop(Box::from_raw(pb));
}

View file

@ -0,0 +1,9 @@
use crate::types::*;
type FIX4 = INT; // 28.4 fixed point value
// constants for working with 28.4 fixed point values
macro_rules! FIX4_SHIFT { () => { 4 } }
macro_rules! FIX4_PRECISION { () => { 4 } }
macro_rules! FIX4_ONE { () => { (1 << FIX4_PRECISION!()) } }
macro_rules! FIX4_HALF { () => { (1 << (FIX4_PRECISION!()-1)) } }
macro_rules! FIX4_MASK { () => { (FIX4_ONE!() - 1) } }

View file

@ -0,0 +1,92 @@
use crate::aacoverage::CCoverageInterval;
use crate::nullable_ref::Ref;
use crate::types::*;
pub trait IGeometrySink
{
//
// Aliased geometry output
//
/*
virtual HRESULT AddVertex(
__in_ecount(1) const MilPoint2F &ptPosition,
// In: Vertex coordinates
__out_ecount(1) WORD *pidxOut
// Out: Index of vertex
) PURE;
virtual HRESULT AddIndexedVertices(
UINT cVertices,
// In: number of vertices
__in_bcount(cVertices*uVertexStride) const void *pVertexBuffer,
// In: vertex buffer containing the vertices
UINT uVertexStride,
// In: size of each vertex
MilVertexFormat mvfFormat,
// In: format of each vertex
UINT cIndices,
// In: Number of indices
__in_ecount(cIndices) const UINT *puIndexBuffer
// In: index buffer
) PURE;
virtual void SetTransformMapping(
__in_ecount(1) const MILMatrix3x2 &mat2DTransform
) PURE;
virtual HRESULT AddTriangle(
DWORD idx1,
// In: Index of triangle's first vertex
DWORD idx2,
// In: Index of triangle's second vertex
DWORD idx3
// In: Index of triangle's third vertex
) PURE;
//
// Trapezoidal AA geometry output
//
*/
fn AddComplexScan(&mut self,
nPixelY: INT,
// In: y coordinate in pixel space
pIntervalSpanStart: Ref<CCoverageInterval>
// In: coverage segments
) -> HRESULT;
fn AddTrapezoid(
&mut self,
rYMin: f32,
// In: y coordinate of top of trapezoid
rXLeftYMin: f32,
// In: x coordinate for top left
rXRightYMin: f32,
// In: x coordinate for top right
rYMax: f32,
// In: y coordinate of bottom of trapezoid
rXLeftYMax: f32,
// In: x coordinate for bottom left
rXRightYMax: f32,
// In: x coordinate for bottom right
rXDeltaLeft: f32,
// In: trapezoid expand radius
rXDeltaRight: f32
// In: trapezoid expand radius
) -> HRESULT;
fn IsEmpty(&self) -> bool;
/*
virtual HRESULT AddParallelogram(
__in_ecount(4) const MilPoint2F *rgPosition
) PURE;
//
// Query sink status
//
// Some geometry generators don't actually know if they have output
// any triangles, so they need to get this information from the geometry sink.
virtual BOOL IsEmpty() PURE;
*/
}

View file

@ -0,0 +1,55 @@
pub fn Int32x32To64(a: i32, b: i32) -> i64 { a as i64 * b as i64 }
macro_rules! IsTagEnabled {
($e: expr) => {
false
}
}
macro_rules! TraceTag {
(($e: expr, $s: expr)) => {
dbg!($s)
}
}
macro_rules! IFC {
($e: expr) => {
assert_eq!($e, S_OK);
}
}
macro_rules! IFR {
($e: expr) => {
let hresult = $e;
if (hresult != S_OK) { return hresult }
}
}
macro_rules! __analysis_assume {
($e: expr) => {
}
}
macro_rules! IFCOOM {
($e: expr) => {
assert_ne!($e, NULL());
}
}
macro_rules! RRETURN1 {
($e: expr, $s1: expr) => {
if $e == $s1 {
} else {
assert_eq!($e, S_OK);
}
return $e;
}
}
macro_rules! RRETURN {
($e: expr) => {
assert_eq!($e, S_OK);
return $e;
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,669 @@
/*!
Converts a 2D path into a set of vertices of a triangle strip mesh that represents the antialiased fill of that path.
```rust
use wpf_gpu_raster::PathBuilder;
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(40., 40.);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
```
*/
#![allow(unused_parens)]
#![allow(overflowing_literals)]
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(dead_code)]
#![allow(unused_macros)]
#[macro_use]
mod fix;
#[macro_use]
mod helpers;
#[macro_use]
mod real;
mod bezier;
#[macro_use]
mod aarasterizer;
mod hwrasterizer;
mod aacoverage;
mod hwvertexbuffer;
mod types;
mod geometry_sink;
mod matrix;
mod nullable_ref;
#[cfg(feature = "c_bindings")]
pub mod c_bindings;
#[cfg(test)]
mod tri_rasterize;
use std::{rc::Rc, cell::RefCell};
use aarasterizer::CheckValidRange28_4;
use hwrasterizer::CHwRasterizer;
use hwvertexbuffer::CHwVertexBufferBuilder;
use matrix::CMatrix;
use real::CFloatFPU;
use types::{CoordinateSpace, CD3DDeviceLevel1, MilFillMode, PathPointTypeStart, MilPoint2F, PathPointTypeLine, MilVertexFormat, MilVertexFormatAttribute, DynArray, BYTE, PathPointTypeBezier, PathPointTypeCloseSubpath, CMILSurfaceRect, POINT};
#[repr(C)]
#[derive(Debug, Default)]
pub struct OutputVertex {
pub x: f32,
pub y: f32,
pub coverage: f32
}
#[repr(C)]
#[derive(Copy, Clone)]
pub enum FillMode {
EvenOdd = 0,
Winding = 1,
}
impl Default for FillMode {
fn default() -> Self {
FillMode::EvenOdd
}
}
#[derive(Clone, Default)]
pub struct OutputPath {
fill_mode: FillMode,
points: Box<[POINT]>,
types: Box<[BYTE]>,
}
impl std::hash::Hash for OutputVertex {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.x.to_bits().hash(state);
self.y.to_bits().hash(state);
self.coverage.to_bits().hash(state);
}
}
pub struct PathBuilder {
points: DynArray<POINT>,
types: DynArray<BYTE>,
initial_point: Option<MilPoint2F>,
current_point: Option<MilPoint2F>,
in_shape: bool,
fill_mode: FillMode,
outside_bounds: Option<CMILSurfaceRect>,
need_inside: bool,
valid_range: bool,
}
impl PathBuilder {
pub fn new() -> Self {
Self {
points: Vec::new(),
types: Vec::new(),
initial_point: None,
current_point: None,
in_shape: false,
fill_mode: FillMode::EvenOdd,
outside_bounds: None,
need_inside: true,
valid_range: true,
}
}
fn add_point(&mut self, x: f32, y: f32) {
self.current_point = Some(MilPoint2F{X: x, Y: y});
// Transform from pixel corner at 0.0 to pixel center at 0.0. Scale into 28.4 range.
// Validate that the point before rounding is within expected bounds for the rasterizer.
let (x, y) = ((x - 0.5) * 16.0, (y - 0.5) * 16.0);
self.valid_range = self.valid_range && CheckValidRange28_4(x, y);
self.points.push(POINT {
x: CFloatFPU::Round(x),
y: CFloatFPU::Round(y),
});
}
pub fn line_to(&mut self, x: f32, y: f32) {
if let Some(initial_point) = self.initial_point {
if !self.in_shape {
self.types.push(PathPointTypeStart);
self.add_point(initial_point.X, initial_point.Y);
self.in_shape = true;
}
self.types.push(PathPointTypeLine);
self.add_point(x, y);
} else {
self.initial_point = Some(MilPoint2F{X: x, Y: y})
}
}
pub fn move_to(&mut self, x: f32, y: f32) {
self.in_shape = false;
self.initial_point = Some(MilPoint2F{X: x, Y: y});
self.current_point = self.initial_point;
}
pub fn curve_to(&mut self, c1x: f32, c1y: f32, c2x: f32, c2y: f32, x: f32, y: f32) {
let initial_point = match self.initial_point {
Some(initial_point) => initial_point,
None => MilPoint2F{X:c1x, Y:c1y}
};
if !self.in_shape {
self.types.push(PathPointTypeStart);
self.add_point(initial_point.X, initial_point.Y);
self.initial_point = Some(initial_point);
self.in_shape = true;
}
self.types.push(PathPointTypeBezier);
self.add_point(c1x, c1y);
self.add_point(c2x, c2y);
self.add_point(x, y);
}
pub fn quad_to(&mut self, cx: f32, cy: f32, x: f32, y: f32) {
// For now we just implement quad_to on top of curve_to.
// Long term we probably want to support quad curves
// directly.
let c0 = match self.current_point {
Some(current_point) => current_point,
None => MilPoint2F{X:cx, Y:cy}
};
let c1x = c0.X + (2./3.) * (cx - c0.X);
let c1y = c0.Y + (2./3.) * (cy - c0.Y);
let c2x = x + (2./3.) * (cx - x);
let c2y = y + (2./3.) * (cy - y);
self.curve_to(c1x, c1y, c2x, c2y, x, y);
}
pub fn close(&mut self) {
if let Some(last) = self.types.last_mut() {
*last |= PathPointTypeCloseSubpath;
}
self.in_shape = false;
self.initial_point = None;
}
pub fn set_fill_mode(&mut self, fill_mode: FillMode) {
self.fill_mode = fill_mode;
}
/// Enables rendering geometry for areas outside the shape but
/// within the bounds. These areas will be created with
/// zero alpha.
///
/// This is useful for creating geometry for other blend modes.
/// For example:
/// - `IN(dest, geometry)` can be done with `outside_bounds` and `need_inside = false`
/// - `IN(dest, geometry, alpha)` can be done with `outside_bounds` and `need_inside = true`
///
/// Note: trapezoidal areas won't be clipped to outside_bounds
pub fn set_outside_bounds(&mut self, outside_bounds: Option<(i32, i32, i32, i32)>, need_inside: bool) {
self.outside_bounds = outside_bounds.map(|r| CMILSurfaceRect { left: r.0, top: r.1, right: r.2, bottom: r.3 });
self.need_inside = need_inside;
}
/// Note: trapezoidal areas won't necessarily be clipped to the clip rect
pub fn rasterize_to_tri_list(&self, clip_x: i32, clip_y: i32, clip_width: i32, clip_height: i32) -> Box<[OutputVertex]> {
if !self.valid_range {
// If any of the points are outside of valid 28.4 range, then just return an empty triangle list.
return Box::new([]);
}
let (x, y, width, height, need_outside) = if let Some(CMILSurfaceRect { left, top, right, bottom }) = self.outside_bounds {
let x0 = clip_x.max(left);
let y0 = clip_y.max(top);
let x1 = (clip_x + clip_width).min(right);
let y1 = (clip_y + clip_height).min(bottom);
(x0, y0, x1 - x0, y1 - y0, true)
} else {
(clip_x, clip_y, clip_width, clip_height, false)
};
rasterize_to_tri_list(self.fill_mode, &self.types, &self.points, x, y, width, height, self.need_inside, need_outside)
}
pub fn get_path(&mut self) -> Option<OutputPath> {
if self.valid_range && !self.points.is_empty() && !self.types.is_empty() {
Some(OutputPath {
fill_mode: self.fill_mode,
points: std::mem::take(&mut self.points).into_boxed_slice(),
types: std::mem::take(&mut self.types).into_boxed_slice(),
})
} else {
None
}
}
}
// Converts a path that is specified as an array of edge types, each associated with a fixed number
// of points that are serialized to the points array. Edge types are specified via PathPointType
// masks, whereas points must be supplied in 28.4 signed fixed-point format. By default, users can
// fill the inside of the path excluding the outside. It may alternatively be desirable to fill the
// outside the path out to the clip boundary, optionally keeping the inside. PathBuilder may be
// used instead as a simpler interface to this function that handles building the path arrays.
pub fn rasterize_to_tri_list(
fill_mode: FillMode,
types: &[BYTE],
points: &[POINT],
clip_x: i32,
clip_y: i32,
clip_width: i32,
clip_height: i32,
need_inside: bool,
need_outside: bool,
) -> Box<[OutputVertex]> {
let mut rasterizer = CHwRasterizer::new();
let mut device = CD3DDeviceLevel1::new();
device.clipRect.X = clip_x;
device.clipRect.Y = clip_y;
device.clipRect.Width = clip_width;
device.clipRect.Height = clip_height;
let device = Rc::new(device);
/*
device.m_rcViewport = device.clipRect;
*/
let worldToDevice: CMatrix<CoordinateSpace::Shape, CoordinateSpace::Device> = CMatrix::Identity();
let mil_fill_mode = match fill_mode {
FillMode::EvenOdd => MilFillMode::Alternate,
FillMode::Winding => MilFillMode::Winding,
};
rasterizer.Setup(device.clone(), mil_fill_mode, Some(&worldToDevice));
let mut m_mvfIn: MilVertexFormat = MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat;
let m_mvfGenerated: MilVertexFormat = MilVertexFormatAttribute::MILVFAttrNone as MilVertexFormat;
//let mvfaAALocation = MILVFAttrNone;
const HWPIPELINE_ANTIALIAS_LOCATION: MilVertexFormatAttribute = MilVertexFormatAttribute::MILVFAttrDiffuse;
let mvfaAALocation = HWPIPELINE_ANTIALIAS_LOCATION;
struct CHwPipeline {
m_pDevice: Rc<CD3DDeviceLevel1>
}
let pipeline = CHwPipeline { m_pDevice: device.clone() };
let m_pHP = &pipeline;
rasterizer.GetPerVertexDataType(&mut m_mvfIn);
let vertexBuilder= Rc::new(RefCell::new(CHwVertexBufferBuilder::Create(m_mvfIn, m_mvfIn | m_mvfGenerated,
mvfaAALocation,
m_pHP.m_pDevice.clone())));
let outside_bounds = if need_outside {
Some(CMILSurfaceRect {
left: clip_x,
top: clip_y,
right: clip_x + clip_width,
bottom: clip_y + clip_height,
})
} else {
None
};
vertexBuilder.borrow_mut().SetOutsideBounds(outside_bounds.as_ref(), need_inside);
vertexBuilder.borrow_mut().BeginBuilding();
rasterizer.SendGeometry(vertexBuilder.clone(), points, types);
vertexBuilder.borrow_mut().FlushTryGetVertexBuffer(None);
device.output.replace(Vec::new()).into_boxed_slice()
}
#[cfg(test)]
mod tests {
use std::{hash::{Hash, Hasher}, collections::hash_map::DefaultHasher};
use crate::{*, tri_rasterize::rasterize_to_mask};
fn calculate_hash<T: Hash>(t: &T) -> u64 {
let mut s = DefaultHasher::new();
t.hash(&mut s);
s.finish()
}
#[test]
fn basic() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(10., 30.);
p.line_to(30., 30.);
p.line_to(30., 10.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 18);
//assert_eq!(dbg!(calculate_hash(&result)), 0x5851570566450135);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfbb7c3932059e240);
}
#[test]
fn simple() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(40., 40.);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
//assert_eq!(dbg!(calculate_hash(&result)), 0x81a9af7769f88e68);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6d1595533d40ef92);
}
#[test]
fn rust() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(40., 40.);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
//assert_eq!(dbg!(calculate_hash(&result)), 0x81a9af7769f88e68);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6d1595533d40ef92);
}
#[test]
fn fill_mode() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(40., 40.);
p.line_to(10., 40.);
p.close();
p.move_to(15., 15.);
p.line_to(35., 15.);
p.line_to(35., 35.);
p.line_to(15., 35.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
//assert_eq!(dbg!(calculate_hash(&result)), 0xb34344234f2f75a8);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xc7bf999c56ccfc34);
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(40., 40.);
p.line_to(10., 40.);
p.close();
p.move_to(15., 15.);
p.line_to(35., 15.);
p.line_to(35., 35.);
p.line_to(15., 35.);
p.close();
p.set_fill_mode(FillMode::Winding);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
//assert_eq!(dbg!(calculate_hash(&result)), 0xee4ecd8a738fc42c);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfafad659db9a2efd);
}
#[test]
fn range() {
// test for a start point out of range
let mut p = PathBuilder::new();
p.curve_to(8.872974e16, 0., 0., 0., 0., 0.);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 0);
// test for a subsequent point out of range
let mut p = PathBuilder::new();
p.curve_to(0., 0., 8.872974e16, 0., 0., 0.);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 0);
}
#[test]
fn multiple_starts() {
let mut p = PathBuilder::new();
p.line_to(10., 10.);
p.move_to(0., 0.);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 0);
}
#[test]
fn path_closing() {
let mut p = PathBuilder::new();
p.curve_to(0., 0., 0., 0., 0., 32.0);
p.close();
p.curve_to(0., 0., 0., 0., 0., 32.0);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 0);
}
#[test]
fn curve() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.curve_to(40., 10., 40., 10., 40., 40.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(dbg!(calculate_hash(&result)), 0x8dbc4d23f9bba38d);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xa92aae8dba7b8cd4);
}
#[test]
fn partial_coverage_last_line() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(40., 39.6);
p.line_to(10., 39.6);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 21);
assert_eq!(dbg!(calculate_hash(&result)), 0xf90cb6afaadfb559);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0xfa200c3bae144952);
}
#[test]
fn delta_upper_bound() {
let mut p = PathBuilder::new();
p.move_to(-122.3 + 200.,84.285);
p.curve_to(-122.3 + 200., 84.285, -122.2 + 200.,86.179, -123.03 + 200., 86.16);
p.curve_to(-123.85 + 200., 86.141, -140.3 + 200., 38.066, -160.83 + 200., 40.309);
p.curve_to(-160.83 + 200., 40.309, -143.05 + 200., 32.956, -122.3 + 200., 84.285);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 400, 400);
assert_eq!(result.len(), 429);
assert_eq!(dbg!(calculate_hash(&result)), 0x52d52992e249587a);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x5e82d98fdb47a796);
}
#[test]
fn self_intersect() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(10., 40.);
p.line_to(40., 40.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(dbg!(calculate_hash(&result)), 0xf10babef5c619d19);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x49ecc769e1d4ec01);
}
#[test]
fn grid() {
let mut p = PathBuilder::new();
for i in 0..200 {
let offset = i as f32 * 1.3;
p.move_to(0. + offset, -8.);
p.line_to(0.5 + offset, -8.);
p.line_to(0.5 + offset, 40.);
p.line_to(0. + offset, 40.);
p.close();
}
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 12000);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x5a7df39d9e9292f0);
}
#[test]
fn outside() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(10., 40.);
p.line_to(40., 40.);
p.close();
p.set_outside_bounds(Some((0, 0, 50, 50)), false);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(dbg!(calculate_hash(&result)), 0x7c5750ee536ae4ee);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x59403ddbb7e1d09a);
// ensure that adjusting the outside bounds changes the results
p.set_outside_bounds(Some((5, 5, 50, 50)), false);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(dbg!(calculate_hash(&result)), 0x55441457b28613e0);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x59403ddbb7e1d09a);
}
#[test]
fn outside_inside() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(40., 10.);
p.line_to(10., 40.);
p.line_to(40., 40.);
p.close();
p.set_outside_bounds(Some((0, 0, 50, 50)), true);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(dbg!(calculate_hash(&result)), 0xaf76b42a5244d1ec);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x49ecc769e1d4ec01);
}
#[test]
fn outside_clipped() {
let mut p = PathBuilder::new();
p.move_to(10., 10.);
p.line_to(10., 40.);
p.line_to(90., 40.);
p.line_to(40., 10.);
p.close();
p.set_outside_bounds(Some((0, 0, 50, 50)), false);
let result = p.rasterize_to_tri_list(0, 0, 50, 50);
assert_eq!(dbg!(calculate_hash(&result)), 0x648a0b7b6aa3b4ed);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x3d2a08f5d0bac999);
}
#[test]
fn clip_edge() {
let mut p = PathBuilder::new();
// tests the bigNumerator < 0 case of aarasterizer::ClipEdge
p.curve_to(-24., -10., -300., 119., 0.0, 0.0);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
// The edge merging only happens between points inside the enumerate buffer. This means
// that the vertex output can depend on the size of the enumerate buffer because there
// the number of edges and positions of vertices will change depending on edge merging.
if ENUMERATE_BUFFER_NUMBER!() == 32 {
assert_eq!(result.len(), 111);
} else {
assert_eq!(result.len(), 171);
}
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x50b887b09a4c16e);
}
#[test]
fn enum_buffer_num() {
let mut p = PathBuilder::new();
p.curve_to(0.0, 0.0, 0.0, 12.0, 0.0, 44.919434);
p.line_to(64.0, 36.0 );
p.line_to(0.0, 80.0,);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 300);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x659cc742f16b42f2);
}
#[test]
fn fill_alternating_empty_interior_pairs() {
let mut p = PathBuilder::new();
p.line_to( 0., 2. );
p.curve_to(0.0, 0.0,1., 6., 0.0, 0.0);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 9);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x726606a662fe46a0);
}
#[test]
fn fill_winding_empty_interior_pairs() {
let mut p = PathBuilder::new();
p.curve_to(45., 61., 0.09, 0., 0., 0.);
p.curve_to(45., 61., 0.09, 0., 0., 0.);
p.curve_to(0., 0., 0., 38., 0.09, 15.);
p.set_fill_mode(FillMode::Winding);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 462);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x651ea4ade5543087);
}
#[test]
fn empty_fill() {
let mut p = PathBuilder::new();
p.move_to(0., 0.);
p.line_to(10., 100.);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 0);
}
#[test]
fn rasterize_line() {
let mut p = PathBuilder::new();
p.move_to(1., 1.);
p.line_to(2., 1.);
p.line_to(2., 2.);
p.line_to(1., 2.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
let mask = rasterize_to_mask(&result, 3, 3);
assert_eq!(&mask[..], &[0, 0, 0,
0, 255, 0,
0, 0, 0][..]);
}
#[test]
fn triangle() {
let mut p = PathBuilder::new();
p.move_to(1., 10.);
p.line_to(100., 13.);
p.line_to(1., 16.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x4757b0c5a19b02f0);
}
#[test]
fn single_pixel() {
let mut p = PathBuilder::new();
p.move_to(1.5, 1.5);
p.line_to(2., 1.5);
p.line_to(2., 2.);
p.line_to(1.5, 2.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(result.len(), 3);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 4, 4)), 0x9f481fe5588e341c);
}
#[test]
fn traps_outside_bounds() {
let mut p = PathBuilder::new();
p.move_to(10., 10.0);
p.line_to(30., 10.);
p.line_to(50., 20.);
p.line_to(30., 30.);
p.line_to(10., 30.);
p.close();
// The generated trapezoids are not necessarily clipped to the outside bounds rect
// and in this case the outside bounds geometry ends up drawing on top of the
// edge geometry which could be considered a bug.
p.set_outside_bounds(Some((0, 0, 50, 30)), true);
let result = p.rasterize_to_tri_list(0, 0, 100, 100);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 100, 100)), 0x6514e3d79d641f09);
}
#[test]
fn quad_to() {
let mut p = PathBuilder::new();
p.move_to(10., 10.0);
p.quad_to(30., 10., 30., 30.);
p.quad_to(10., 30., 30., 30.);
p.quad_to(60., 30., 60., 10.);
p.close();
let result = p.rasterize_to_tri_list(0, 0, 70, 40);
assert_eq!(result.len(), 279);
assert_eq!(calculate_hash(&rasterize_to_mask(&result, 70, 40)), 0xbd2eec3cfe9bd30b);
}
}

View file

@ -0,0 +1,37 @@
use std::marker::PhantomData;
use crate::types::CoordinateSpace;
pub type CMILMatrix = CMatrix<CoordinateSpace::Shape,CoordinateSpace::Device>;
#[derive(Default, Clone)]
pub struct CMatrix<InCoordSpace, OutCoordSpace> {
_11: f32, _12: f32, _13: f32, _14: f32,
_21: f32, _22: f32, _23: f32 , _24: f32,
_31: f32, _32: f32, _33: f32, _34: f32,
_41: f32, _42: f32, _43: f32, _44: f32,
in_coord: PhantomData<InCoordSpace>,
out_coord: PhantomData<OutCoordSpace>
}
impl<InCoordSpace: Default, OutCoordSpace: Default> CMatrix<InCoordSpace, OutCoordSpace> {
pub fn Identity() -> Self { let mut ret: Self = Default::default();
ret._11 = 1.;
ret._22 = 1.;
ret._33 = 1.;
ret._44 = 1.;
ret
}
pub fn GetM11(&self) -> f32 { self._11 }
pub fn GetM12(&self) -> f32 { self._12 }
pub fn GetM21(&self) -> f32 { self._21 }
pub fn GetM22(&self) -> f32 { self._22 }
pub fn GetDx(&self) -> f32 { self._41 }
pub fn GetDy(&self) -> f32 { self._42 }
pub fn SetM11(&mut self, r: f32) { self._11 = r}
pub fn SetM12(&mut self, r: f32) { self._12 = r}
pub fn SetM21(&mut self, r: f32) { self._21 = r}
pub fn SetM22(&mut self, r: f32) { self._22 = r}
pub fn SetDx(&mut self, dx: f32) { self._41 = dx }
pub fn SetDy(&mut self, dy: f32) { self._42 = dy }
}

View file

@ -0,0 +1,12 @@
crossing goto
./MultiSpaceRectF.inl:70:5: error: call to implicitly-deleted default constructor of 'union (anonymous union at ./MultiSpaceRectF.inl:138:5)'
Rust conversion
---------------
CEdge is a singly linked list
Future
------
When flatening curves if we try to flatten at integer values
we can avoid the ComplexSpan code path.

View file

@ -0,0 +1,53 @@
use std::{marker::PhantomData, ops::Deref};
pub struct Ref<'a, T> {
ptr: *const T,
_phantom: PhantomData<&'a T>
}
impl<'a, T> Copy for Ref<'a, T> { }
impl<'a, T> Clone for Ref<'a, T> {
fn clone(&self) -> Self {
*self
}
}
impl<'a, T> Ref<'a, T> {
pub fn new(p: &'a T) -> Self {
Ref { ptr: p as *const T, _phantom: PhantomData}
}
pub unsafe fn null() -> Self {
Ref { ptr: std::ptr::null(), _phantom: PhantomData}
}
pub fn is_null(&self) -> bool {
self.ptr.is_null()
}
pub fn get_ref(self) -> &'a T {
unsafe { &*self.ptr }
}
}
impl<'a, T> PartialEq for Ref<'a, T> {
fn eq(&self, other: &Self) -> bool {
self.ptr == other.ptr && self._phantom == other._phantom
}
}
impl<'a, T> PartialOrd for Ref<'a, T> {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
match self.ptr.partial_cmp(&other.ptr) {
Some(core::cmp::Ordering::Equal) => {}
ord => return ord,
}
self._phantom.partial_cmp(&other._phantom)
}
}
impl<'a, T> Deref for Ref<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr }
}
}

View file

@ -0,0 +1,163 @@
pub mod CFloatFPU {
// Maximum allowed argument for SmallRound
// const sc_uSmallMax: u32 = 0xFFFFF;
// Binary representation of static_cast<float>(sc_uSmallMax)
const sc_uBinaryFloatSmallMax: u32 = 0x497ffff0;
fn LargeRound(x: f32) -> i32 {
//XXX: the SSE2 version is probably slower than a naive SSE4 implementation that can use roundss
#[cfg(target_feature = "sse2")]
unsafe {
#[cfg(target_arch = "x86")]
use std::arch::x86::{__m128, _mm_set_ss, _mm_cvtss_si32, _mm_cvtsi32_ss, _mm_sub_ss, _mm_cmple_ss, _mm_store_ss, _mm_setzero_ps};
#[cfg(target_arch = "x86_64")]
use std::arch::x86_64::{__m128, _mm_set_ss, _mm_cvtss_si32, _mm_cvtsi32_ss, _mm_sub_ss, _mm_cmple_ss, _mm_store_ss, _mm_setzero_ps};
let given: __m128 = _mm_set_ss(x); // load given value
let result = _mm_cvtss_si32(given);
let rounded: __m128 = _mm_setzero_ps(); // convert it to integer (rounding mode doesn't matter)
let rounded = _mm_cvtsi32_ss(rounded, result); // convert back to float
let diff = _mm_sub_ss(rounded, given); // diff = (rounded - given)
let negHalf = _mm_set_ss(-0.5); // load -0.5f
let mask = _mm_cmple_ss(diff, negHalf); // get all-ones if (rounded - given) < -0.5f
let mut correction: i32 = 0;
_mm_store_ss((&mut correction) as *mut _ as *mut _, mask); // get comparison result as integer
return result - correction; // correct the result of rounding
}
#[cfg(not(target_feature = "sse2"))]
return (x + 0.5).floor() as i32;
}
//+------------------------------------------------------------------------
//
// Function: CFloatFPU::SmallRound
//
// Synopsis: Convert given floating point value to nearest integer.
// Half-integers are rounded up.
//
// Important: this routine is fast but restricted:
// given x should be within (-(0x100000-.5) < x < (0x100000-.5))
//
// Details: Implementation has abnormal looking that use to confuse
// many people. However, it indeed works, being tested
// thoroughly on x86 and ia64 platforms for literally
// each possible argument values in the given range.
//
// More details:
// Implementation is based on the knowledge of floating point
// value representation. This 32-bits value consists of three parts:
// v & 0x80000000 = sign
// v & 0x7F800000 = exponent
// v & 0x007FFFFF - mantissa
//
// Let N to be a floating point number within -0x400000 <= N <= 0x3FFFFF.
// The sum (S = 0xC00000 + N) thus will satisfy Ox800000 <= S <= 0xFFFFFF.
// All the numbers within this range (sometimes referred to as "binade")
// have same position of most significant bit, i.e. 0x800000.
// Therefore they are normalized equal way, thus
// providing the weights on mantissa's bits to be the same
// as integer numbers have. In other words, to get
// integer value of floating point S, when Ox800000 <= S <= 0xFFFFFF,
// we can just throw away the exponent and sign, and add assumed
// most significant bit (that is always 1 and therefore is not stored
// in floating point value):
// (int)S = (<float S as int> & 0x7FFFFF | 0x800000);
// To get given N in as integer, we need to subtract back
// the value 0xC00000 that was added in order to obtain
// proper normalization:
// N = (<float S as int> & 0x7FFFFF | 0x800000) - 0xC00000.
// or
// N = (<float S as int> & 0x7FFFFF ) - 0x400000.
//
// Hopefully, the text above explains how
// following routine works:
// int SmallRound1(float x)
// {
// union
// {
// __int32 i;
// float f;
// } u;
//
// u.f = x + float(0x00C00000);
// return ((u.i - (int)0x00400000) << 9) >> 9;
// }
// Unfortunatelly it is imperfect, due to the way how FPU
// use to round intermediate calculation results.
// By default, rounding mode is set to "nearest".
// This means that when it calculates N+float(0x00C00000),
// the 80-bit precise result will not fit in 32-bit float,
// so some least significant bits will be thrown away.
// Rounding to nearest means that S consisting of intS + fraction,
// where 0 <= fraction < 1, will be converted to intS
// when fraction < 0.5 and to intS+1 if fraction > 0.5.
// What would happen with fraction exactly equal to 0.5?
// Smart thing: S will go to intS if intS is even and
// to intS+1 if intS is odd. In other words, half-integers
// are rounded to nearest even number.
// This FPU feature apparently is useful to minimize
// average rounding error when somebody is, say,
// digitally simulating electrons' behavior in plasma.
// However for graphics this is not desired.
//
// We want to move half-integers up, therefore
// define SmallRound(x) as {return SmallRound1(x*2+.5) >> 1;}.
// This may require more comments.
// Let given x = i+f, where i is integer and f is fraction, 0 <= f < 1.
// Let's wee what is y = x*2+.5:
// y = i*2 + (f*2 + .5) = i*2 + g, where g = f*2 + .5;
// If "f" is in the range 0 <= f < .5 (so correct rounding result should be "i"),
// then range for "g" is .5 <= g < 1.5. The very first value, .5 will force
// SmallRound1 result to be "i*2", due to round-to-even rule; the remaining
// will lead to "i*2+1". Consequent shift will throw away extra "1" and give
// us desired "i".
// When "f" in in the range .5 <= f < 1, then 1.5 <= g < 2.5.
// All these values will round to 2, so SmallRound1 will return (2*i+2),
// and the final shift will give desired 1+1.
//
// To get final routine looking we need to transform the combines
// expression for u.f:
// (x*2) + .5 + float(0x00C00000) ==
// (x + (.25 + double(0x00600000)) )*2
// Note that the ratio "2" means nothing for following operations,
// since it affects only exponent bits that are ignored anyway.
// So we can save some processor cycles avoiding this multiplication.
//
// And, the very final beautification:
// to avoid subtracting 0x00400000 let's ignore this bit.
// This mean that we effectively decrease available range by 1 bit,
// but we're chasing for performance and found it acceptable.
// So
// return ((u.i - (int)0x00400000) << 9) >> 9;
// is converted to
// return ((u.i ) << 10) >> 10;
// Eventually, will found that final shift by 10 bits may be combined
// with shift by 1 in the definition {return SmallRound1(x*2+.5) >> 1;},
// we'll just shift by 11 bits. That's it.
//
//-------------------------------------------------------------------------
fn SmallRound(x: f32) -> i32
{
//AssertPrecisionAndRoundingMode();
debug_assert!(-(0x100000 as f64 -0.5) < x as f64 && (x as f64) < (0x100000 as f64 -0.5));
let fi = (x as f64 + (0x00600000 as f64 + 0.25)) as f32;
let result = ((fi.to_bits() as i32) << 10) >> 11;
debug_assert!(x < (result as f32) + 0.5 && x >= (result as f32) - 0.5);
return result;
}
pub fn Round(x: f32) -> i32
{
// cut off sign
let xAbs: u32 = x.to_bits() & 0x7FFFFFFF;
return if xAbs <= sc_uBinaryFloatSmallMax {SmallRound(x)} else {LargeRound(x)};
}
}
macro_rules! TOREAL { ($e: expr) => { $e as REAL } }

View file

@ -0,0 +1,190 @@
/* The rasterization code here is based off of piglit/tests/general/triangle-rasterization.cpp:
/**************************************************************************
*
* Copyright 2012 VMware, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
*/
use std::ops::Index;
use crate::OutputVertex;
#[derive(Debug)]
struct Vertex {
x: f32,
y: f32,
coverage: f32
}
#[derive(Debug)]
struct Triangle {
v: [Vertex; 3],
}
impl Index<usize> for Triangle {
type Output = Vertex;
fn index(&self, index: usize) -> &Self::Output {
&self.v[index]
}
}
// D3D11 mandates 8 bit subpixel precision:
// https://microsoft.github.io/DirectX-Specs/d3d/archive/D3D11_3_FunctionalSpec.htm#CoordinateSnapping
const FIXED_SHIFT: i32 = 8;
const FIXED_ONE: f32 = (1 << FIXED_SHIFT) as f32;
/* Proper rounding of float to integer */
fn iround(mut v: f32) -> i64 {
if v > 0.0 {
v += 0.5;
}
if v < 0.0 {
v -= 0.5;
}
return v as i64
}
/* Based on http://devmaster.net/forums/topic/1145-advanced-rasterization */
fn rast_triangle(buffer: &mut [u8], width: usize, height: usize, tri: &Triangle) {
let center_offset = -0.5;
let mut coverage1 = tri[0].coverage;
let mut coverage2 = tri[1].coverage;
let mut coverage3 = tri[2].coverage;
/* fixed point coordinates */
let mut x1 = iround(FIXED_ONE * (tri[0].x + center_offset));
let x2 = iround(FIXED_ONE * (tri[1].x + center_offset));
let mut x3 = iround(FIXED_ONE * (tri[2].x + center_offset));
let mut y1 = iround(FIXED_ONE * (tri[0].y + center_offset));
let y2 = iround(FIXED_ONE * (tri[1].y + center_offset));
let mut y3 = iround(FIXED_ONE * (tri[2].y + center_offset));
/* Force correct vertex order */
let cross = (x2 - x1) * (y3 - y2) - (y2 - y1) * (x3 - x2);
if cross > 0 {
std::mem::swap(&mut x1, &mut x3);
std::mem::swap(&mut y1, &mut y3);
// I don't understand why coverage 2 and 3 are swapped instead of 1 and 3
std::mem::swap(&mut coverage2, &mut coverage3);
} else {
std::mem::swap(&mut coverage1, &mut coverage3);
}
/* Deltas */
let dx12 = x1 - x2;
let dx23 = x2 - x3;
let dx31 = x3 - x1;
let dy12 = y1 - y2;
let dy23 = y2 - y3;
let dy31 = y3 - y1;
/* Fixed-point deltas */
let fdx12 = dx12 << FIXED_SHIFT;
let fdx23 = dx23 << FIXED_SHIFT;
let fdx31 = dx31 << FIXED_SHIFT;
let fdy12 = dy12 << FIXED_SHIFT;
let fdy23 = dy23 << FIXED_SHIFT;
let fdy31 = dy31 << FIXED_SHIFT;
/* Bounding rectangle */
let mut minx = x1.min(x2).min(x3) >> FIXED_SHIFT;
let mut maxx = x1.max(x2).max(x3) >> FIXED_SHIFT;
let mut miny = y1.min(y2).min(y3) >> FIXED_SHIFT;
let mut maxy = y1.max(y2).max(y3) >> FIXED_SHIFT;
minx = minx.max(0);
maxx = maxx.min(width as i64 - 1);
miny = miny.max(0);
maxy = maxy.min(height as i64 - 1);
/* Half-edge constants */
let mut c1 = dy12 * x1 - dx12 * y1;
let mut c2 = dy23 * x2 - dx23 * y2;
let mut c3 = dy31 * x3 - dx31 * y3;
/* Correct for top-left filling convention */
if dy12 < 0 || (dy12 == 0 && dx12 < 0) { c1 += 1 }
if dy23 < 0 || (dy23 == 0 && dx23 < 0) { c2 += 1 }
if dy31 < 0 || (dy31 == 0 && dx31 < 0) { c3 += 1 }
let mut cy1 = c1 + dx12 * (miny << FIXED_SHIFT) - dy12 * (minx << FIXED_SHIFT);
let mut cy2 = c2 + dx23 * (miny << FIXED_SHIFT) - dy23 * (minx << FIXED_SHIFT);
let mut cy3 = c3 + dx31 * (miny << FIXED_SHIFT) - dy31 * (minx << FIXED_SHIFT);
//dbg!(minx, maxx, tri, cross);
/* Perform rasterization */
let mut buffer = &mut buffer[miny as usize * width..];
for _y in miny..=maxy {
let mut cx1 = cy1;
let mut cx2 = cy2;
let mut cx3 = cy3;
for x in minx..=maxx {
if cx1 > 0 && cx2 > 0 && cx3 > 0 {
// cross is equal to 2*area of the triangle.
// we can normalize cx by 2*area to get barycentric coords.
let area = cross.abs() as f32;
let bary = (cx1 as f32 / area, cx2 as f32 / area, cx3 as f32 / area);
let coverages = coverage1 * bary.0 + coverage2 * bary.1 + coverage3 * bary.2;
let color = (coverages * 255. + 0.5) as u8;
buffer[x as usize] = color;
}
cx1 -= fdy12;
cx2 -= fdy23;
cx3 -= fdy31;
}
cy1 += fdx12;
cy2 += fdx23;
cy3 += fdx31;
buffer = &mut buffer[width..];
}
}
pub fn rasterize_to_mask(vertices: &[OutputVertex], width: u32, height: u32) -> Box<[u8]> {
let mut mask = vec![0; (width * height) as usize];
for n in (0..vertices.len()).step_by(3) {
let tri =
[&vertices[n], &vertices[n+1], &vertices[n+2]];
let tri = Triangle { v: [
Vertex { x: tri[0].x, y: tri[0].y, coverage: tri[0].coverage},
Vertex { x: tri[1].x, y: tri[1].y, coverage: tri[1].coverage},
Vertex { x: tri[2].x, y: tri[2].y, coverage: tri[2].coverage}
]
};
rast_triangle(&mut mask, width as usize, height as usize, &tri);
}
mask.into_boxed_slice()
}

View file

@ -0,0 +1,201 @@
pub(crate) type LONG = i32;
pub(crate) type INT = i32;
pub(crate) type UINT = u32;
pub(crate) type ULONG = u32;
pub(crate) type DWORD = ULONG;
pub(crate) type WORD = u16;
pub(crate) type LONGLONG = i64;
pub(crate) type ULONGLONG = u64;
pub(crate) type BYTE = u8;
pub(crate) type FLOAT = f32;
pub(crate) type REAL = FLOAT;
pub(crate) type HRESULT = LONG;
pub(crate) const S_OK: HRESULT = 0;
pub(crate) const INTSAFE_E_ARITHMETIC_OVERFLOW: HRESULT = 0x80070216;
pub(crate) const WGXERR_VALUEOVERFLOW: HRESULT = INTSAFE_E_ARITHMETIC_OVERFLOW;
pub(crate) const WINCODEC_ERR_VALUEOVERFLOW: HRESULT = INTSAFE_E_ARITHMETIC_OVERFLOW;
const fn MAKE_HRESULT(sev: LONG,fac: LONG,code: LONG) -> HRESULT {
( (((sev)<<31) | ((fac)<<16) | ((code))) )
}
const FACILITY_WGX: LONG = 0x898;
const fn MAKE_WGXHR( sev: LONG, code: LONG) -> HRESULT {
MAKE_HRESULT( sev, FACILITY_WGX, (code) )
}
const fn MAKE_WGXHR_ERR( code: LONG ) -> HRESULT
{
MAKE_WGXHR( 1, code )
}
pub const WGXHR_CLIPPEDTOEMPTY: HRESULT = MAKE_WGXHR(0, 1);
pub const WGXHR_EMPTYFILL: HRESULT = MAKE_WGXHR(0, 2);
pub const WGXHR_INTERNALTEMPORARYSUCCESS: HRESULT = MAKE_WGXHR(0, 3);
pub const WGXHR_RESETSHAREDHANDLEMANAGER: HRESULT = MAKE_WGXHR(0, 4);
pub const WGXERR_BADNUMBER: HRESULT = MAKE_WGXHR_ERR(0x00A); // 4438
pub fn FAILED(hr: HRESULT) -> bool {
hr != S_OK
}
pub trait NullPtr {
fn make() -> Self;
}
impl<T> NullPtr for *mut T {
fn make() -> Self {
std::ptr::null_mut()
}
}
impl<T> NullPtr for *const T {
fn make() -> Self {
std::ptr::null()
}
}
pub fn NULL<T: NullPtr>() -> T {
T::make()
}
#[derive(Default, Clone)]
pub struct RECT {
pub left: LONG,
pub top: LONG,
pub right: LONG,
pub bottom: LONG,
}
#[derive(Default, Clone, Copy, PartialEq, Eq)]
pub struct POINT {
pub x: LONG,
pub y: LONG
}
#[derive(Clone, Copy)]
pub struct MilPoint2F
{
pub X: FLOAT,
pub Y: FLOAT,
}
#[derive(Default, Clone)]
pub struct MilPointAndSizeL
{
pub X: INT,
pub Y: INT,
pub Width: INT,
pub Height: INT,
}
pub type CMILSurfaceRect = RECT;
#[derive(PartialEq)]
pub enum MilAntiAliasMode {
None = 0,
EightByEight = 1,
}
#[derive(PartialEq, Clone, Copy)]
pub enum MilFillMode {
Alternate = 0,
Winding = 1,
}
pub const PathPointTypeStart: u8 = 0; // move, 1 point
pub const PathPointTypeLine: u8 = 1; // line, 1 point
pub const PathPointTypeBezier: u8 = 3; // default Bezier (= cubic Bezier), 3 points
pub const PathPointTypePathTypeMask: u8 = 0x07; // type mask (lowest 3 bits).
pub const PathPointTypeCloseSubpath: u8 = 0x80; // closed flag
use std::cell::RefCell;
use crate::{hwvertexbuffer::CHwVertexBuffer, OutputVertex};
pub type DynArray<T> = Vec<T>;
pub trait DynArrayExts<T> {
fn Reset(&mut self, shrink: bool);
fn GetCount(&self) -> usize;
fn SetCount(&mut self, count: usize);
fn GetDataBuffer(&self) -> &[T];
}
impl<T> DynArrayExts<T> for DynArray<T> {
fn Reset(&mut self, shrink: bool) {
self.clear();
if shrink {
self.shrink_to_fit();
}
}
fn GetCount(&self) -> usize {
self.len()
}
fn SetCount(&mut self, count: usize) {
assert!(count <= self.len());
self.truncate(count);
}
fn GetDataBuffer(&self) -> &[T] {
self
}
}
#[derive(Default)]
pub struct CD3DDeviceLevel1 {
pub clipRect: MilPointAndSizeL,
pub output: RefCell<Vec<OutputVertex>>
}
impl CD3DDeviceLevel1 {
pub fn new() -> Self { Default::default() }
pub fn GetClipRect(&self, rect: &mut MilPointAndSizeL) {
*rect = self.clipRect.clone();
}
pub fn GetViewport(&self) -> MilPointAndSizeL { self.clipRect.clone() }
pub fn GetVB_XYZDUV2(&self) -> Box<CHwVertexBuffer> {
Box::new(Default::default())
}
}
pub struct CHwPipelineBuilder;
pub mod CoordinateSpace {
#[derive(Default, Clone)]
pub struct Shape;
#[derive(Default, Clone)]
pub struct Device;
}
pub trait IShapeData {
fn GetFillMode(&self) -> MilFillMode;
}
pub type MilVertexFormat = DWORD;
pub enum MilVertexFormatAttribute {
MILVFAttrNone = 0x0,
MILVFAttrXY = 0x1,
MILVFAttrZ = 0x2,
MILVFAttrXYZ = 0x3,
MILVFAttrNormal = 0x4,
MILVFAttrDiffuse = 0x8,
MILVFAttrSpecular = 0x10,
MILVFAttrUV1 = 0x100,
MILVFAttrUV2 = 0x300,
MILVFAttrUV3 = 0x700,
MILVFAttrUV4 = 0xf00,
MILVFAttrUV5 = 0x1f00,
MILVFAttrUV6 = 0x3f00,
MILVFAttrUV7 = 0x7f00,
MILVFAttrUV8 = 0xff00, // Vertex fields that are pre-generated
}
pub struct CHwPipeline;
pub struct CBufferDispenser;
#[derive(Default)]
pub struct PointXYA
{
pub x: f32,pub y: f32, pub a: f32,
}

View file

@ -99,6 +99,8 @@ localization-ffi = { path = "../../../../intl/l10n/rust/localization-ffi" }
processtools = { path = "../../../components/processtools" }
qcms = { path = "../../../../gfx/qcms", features = ["c_bindings", "neon"], default-features = false }
wpf-gpu-raster = { git = "https://github.com/FirefoxGraphics/wpf-gpu-raster", rev = "f0d95ce14af8a8de74f469dbad715c4064fca2e1" }
# Force url to stay at 2.1.0. See bug 1734538.
url = "=2.1.0"

View file

@ -71,6 +71,7 @@ extern crate neqo_glue;
extern crate wgpu_bindings;
extern crate qcms;
extern crate wpf_gpu_raster;
extern crate unic_langid;
extern crate unic_langid_ffi;