diff --git a/CHANGELOG.md b/CHANGELOG.md index 3c40df86..6895f992 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## Unreleased +### Added + +- [#57](https://github.com/bobozaur/sqlx-exasol/pull/57): Complete proc-macro override + - Removes conflicts with `sqlx` in the same crate when using proc-macros + ### Fixed - [#50](https://github.com/bobozaur/sqlx-exasol/issues/50): Fix ETL `IMPORT` deadlocks diff --git a/Cargo.toml b/Cargo.toml index 32273a51..40cbad3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,10 +42,10 @@ features = [ [features] default = ["any", "macros", "migrate", "json"] -derive = ["sqlx/derive", "sqlx-exasol-macros?/derive"] -macros = ["derive", "sqlx/macros", "sqlx-exasol-macros/macros"] +derive = ["sqlx-a-orig/derive", "sqlx-exasol-macros?/derive"] +macros = ["derive", "sqlx-a-orig/macros", "sqlx-exasol-macros/macros"] migrate = [ - "sqlx/migrate", + "sqlx-a-orig/migrate", "sqlx-exasol-impl/migrate", "sqlx-exasol-macros?/migrate", ] @@ -53,47 +53,47 @@ migrate = [ sqlx-toml = ["sqlx-exasol-impl/sqlx-toml", "sqlx-exasol-macros?/sqlx-toml"] # Base runtime features without TLS -runtime-async-std = ["sqlx/runtime-async-std"] -runtime-tokio = ["sqlx/runtime-tokio"] +runtime-async-std = ["sqlx-a-orig/runtime-async-std"] +runtime-tokio = ["sqlx-a-orig/runtime-tokio"] # TLS features -tls-native-tls = ["sqlx/tls-native-tls", "sqlx-exasol-impl/native-tls"] +tls-native-tls = ["sqlx-a-orig/tls-native-tls", "sqlx-exasol-impl/native-tls"] tls-rustls-aws-lc-rs = [ - "sqlx/tls-rustls-aws-lc-rs", + "sqlx-a-orig/tls-rustls-aws-lc-rs", "sqlx-exasol-impl/rustls-aws-lc-rs", ] tls-rustls-ring-webpki = [ - "sqlx/tls-rustls-ring-webpki", + "sqlx-a-orig/tls-rustls-ring-webpki", "sqlx-exasol-impl/rustls-ring", ] tls-rustls-ring-native-roots = [ - "sqlx/tls-rustls-ring-native-roots", + "sqlx-a-orig/tls-rustls-ring-native-roots", "sqlx-exasol-impl/rustls-ring", ] # Database -any = ["sqlx/any", "sqlx-exasol-impl/any"] +any = ["sqlx-a-orig/any", "sqlx-exasol-impl/any"] # Types bigdecimal = [ - "sqlx/bigdecimal", + "sqlx-a-orig/bigdecimal", "sqlx-exasol-impl/bigdecimal", "sqlx-exasol-macros?/bigdecimal", ] chrono = [ - "sqlx/chrono", + "sqlx-a-orig/chrono", "sqlx-exasol-impl/chrono", "sqlx-exasol-macros?/chrono", ] geo-types = ["sqlx-exasol-impl/geo-types", "sqlx-exasol-macros?/geo-types"] -json = ["sqlx/json", "sqlx-exasol-impl/json", "sqlx-exasol-macros?/json"] +json = ["sqlx-a-orig/json", "sqlx-exasol-impl/json", "sqlx-exasol-macros?/json"] rust_decimal = [ - "sqlx/rust_decimal", + "sqlx-a-orig/rust_decimal", "sqlx-exasol-impl/rust_decimal", "sqlx-exasol-macros?/rust_decimal", ] -time = ["sqlx/time", "sqlx-exasol-impl/time", "sqlx-exasol-macros?/time"] -uuid = ["sqlx/uuid", "sqlx-exasol-impl/uuid", "sqlx-exasol-macros?/uuid"] +time = ["sqlx-a-orig/time", "sqlx-exasol-impl/time", "sqlx-exasol-macros?/time"] +uuid = ["sqlx-a-orig/uuid", "sqlx-exasol-impl/uuid", "sqlx-exasol-macros?/uuid"] # Driver specific features compression = ["sqlx-exasol-impl/compression"] @@ -140,6 +140,9 @@ hyper = { version = "1", default-features = false, features = [ ] } native-tls = { version = "0.2", default-features = false } paste = { version = "1", default-features = false } +proc-macro2 = { version = "1", default-features = false, features = [ + "proc-macro", +] } quote = { version = "1", default-features = false } rand = { version = "0.8", default-features = false, features = [ "std", @@ -162,16 +165,21 @@ serde_json = { version = "1", default-features = false, features = [ "raw_value", ] } sha2 = { version = "0.10", default-features = false, features = ["std"] } -sqlx = { version = "0.9.0-alpha.1", default-features = false } sqlx-cli = { version = "0.9.0-alpha.1", default-features = false } sqlx-core = { version = "0.9.0-alpha.1", default-features = false, features = [ "offline", "migrate", ] } sqlx-macros-core = { version = "0.9.0-alpha.1", default-features = false } +# Purposely named like this to prevent formatting reordering. +# We also get the benefit of ensuring that our overrides work correctly since `sqlx` is not available +sqlx-a-orig = { version = "0.9.0-alpha.1", default-features = false, package = "sqlx" } syn = { version = "2", default-features = false, features = [ + "full", "parsing", + "printing", "proc-macro", + "visit-mut", ] } thiserror = { version = "2", default-features = false } time = { version = "0.3", default-features = false, features = [ @@ -195,7 +203,7 @@ wkt = { version = "0.14", default-features = false, features = [ [dependencies] sqlx-exasol-macros = { workspace = true, optional = true } sqlx-exasol-impl = { workspace = true } -sqlx = { workspace = true } +sqlx-a-orig = { workspace = true } [dev-dependencies] dotenvy = { workspace = true } @@ -225,4 +233,3 @@ unused_extern_crates = "warn" unused_import_braces = "warn" unused_lifetimes = "warn" unused_qualifications = "warn" -# Enable parsing of `sqlx.toml` for configuring macros and migrations. diff --git a/README.md b/README.md index da0eee21..6ada5608 100644 --- a/README.md +++ b/README.md @@ -25,20 +25,9 @@ it can do all the drivers shipped with `sqlx` do, with some caveats: ## Compile-time query checks -The driver now supports compile-time query validation. - -However, full functionality is implemented through path overrides and due to `sqlx` macros -implementation details you will currently need to either add `extern crate sqlx_exasol as sqlx;` -to the root of your crate or rename the crate import to `sqlx` in `Cargo.toml`: - -```toml -sqlx = { version = "*", package = "sqlx-exasol" } -``` - -This implies that the compile time query macros from both `sqlx-exasol` and `sqlx` -cannot co-exist within the same crate without collisions or unexpected surprises. - -See for more details. +The driver now supports compile-time query validation and can be used alongside +`sqlx` within the same crate. Note however that derive proc-macros from `sqlx` are +database agnostic and thus `sqlx-exasol` just re-exports them as-is. ## CLI utility diff --git a/sqlx-exasol-impl/Cargo.toml b/sqlx-exasol-impl/Cargo.toml index 28a290f0..c8addb63 100644 --- a/sqlx-exasol-impl/Cargo.toml +++ b/sqlx-exasol-impl/Cargo.toml @@ -81,7 +81,11 @@ uuid = { workspace = true, optional = true } wkt = { workspace = true, optional = true } [dev-dependencies] -sqlx = { workspace = true, features = ["runtime-tokio", "macros", "migrate"] } +sqlx-a-orig = { workspace = true, features = [ + "runtime-tokio", + "macros", + "migrate", +] } [lints] workspace = true diff --git a/sqlx-exasol-impl/src/lib.rs b/sqlx-exasol-impl/src/lib.rs index 40acc753..0ba64794 100644 --- a/sqlx-exasol-impl/src/lib.rs +++ b/sqlx-exasol-impl/src/lib.rs @@ -1,6 +1,9 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] //! **EXASOL** database driver. +#[cfg(test)] +extern crate sqlx_a_orig as sqlx; + #[cfg(feature = "native-tls")] use native_tls as _; #[cfg(feature = "tls")] diff --git a/sqlx-exasol-macros/Cargo.toml b/sqlx-exasol-macros/Cargo.toml index ea5d1d3d..bee2b49b 100644 --- a/sqlx-exasol-macros/Cargo.toml +++ b/sqlx-exasol-macros/Cargo.toml @@ -17,8 +17,6 @@ proc-macro = true # SQLx features derive = ["dep:sqlx-macros-core", "sqlx-macros-core?/derive"] macros = [ - "dep:syn", - "dep:quote", "dep:sqlx-macros-core", "dep:sqlx-exasol-impl", "sqlx-macros-core?/macros", @@ -72,10 +70,12 @@ uuid = [ ] [dependencies] +proc-macro2 = { workspace = true } +syn = { workspace = true } +quote = { workspace = true } + sqlx-macros-core = { workspace = true, optional = true } sqlx-exasol-impl = { workspace = true, optional = true } -syn = { workspace = true, optional = true } -quote = { workspace = true, optional = true } [lints] workspace = true diff --git a/sqlx-exasol-macros/src/lib.rs b/sqlx-exasol-macros/src/lib.rs index 025aacac..702b7986 100644 --- a/sqlx-exasol-macros/src/lib.rs +++ b/sqlx-exasol-macros/src/lib.rs @@ -1,12 +1,97 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] +mod parse; + +#[allow(unused_imports, reason = "built-in; conditionally compiled")] +use proc_macro::TokenStream; + #[cfg(feature = "macros")] #[proc_macro] -pub fn expand_query(input: proc_macro::TokenStream) -> proc_macro::TokenStream { +pub fn expand_query(input: TokenStream) -> TokenStream { let input = syn::parse_macro_input!(input as sqlx_macros_core::query::QueryMacroInput); match sqlx_macros_core::query::expand_input(input, &[sqlx_exasol_impl::QUERY_DRIVER]) { - Ok(ts) => ts.into(), + Ok(ts) => parse::rewrite(ts).into(), + Err(e) => { + if let Some(parse_err) = e.downcast_ref::() { + parse_err.to_compile_error().into() + } else { + let msg = e.to_string(); + quote::quote!(::std::compile_error!(#msg)).into() + } + } + } +} + +#[cfg(feature = "derive")] +#[proc_macro_derive(Encode, attributes(sqlx))] +pub fn derive_encode(tokenstream: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(tokenstream as syn::DeriveInput); + match sqlx_macros_core::derives::expand_derive_encode(&input) { + Ok(ts) => parse::rewrite(ts).into(), + Err(e) => e.to_compile_error().into(), + } +} + +#[cfg(feature = "derive")] +#[proc_macro_derive(Decode, attributes(sqlx))] +pub fn derive_decode(tokenstream: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(tokenstream as syn::DeriveInput); + match sqlx_macros_core::derives::expand_derive_decode(&input) { + Ok(ts) => parse::rewrite(ts).into(), + Err(e) => e.to_compile_error().into(), + } +} + +#[cfg(feature = "derive")] +#[proc_macro_derive(Type, attributes(sqlx))] +pub fn derive_type(tokenstream: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(tokenstream as syn::DeriveInput); + match sqlx_macros_core::derives::expand_derive_type_encode_decode(&input) { + Ok(ts) => parse::rewrite(ts).into(), + Err(e) => e.to_compile_error().into(), + } +} + +#[cfg(feature = "derive")] +#[proc_macro_derive(FromRow, attributes(sqlx))] +pub fn derive_from_row(input: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(input as syn::DeriveInput); + + match sqlx_macros_core::derives::expand_derive_from_row(&input) { + Ok(ts) => parse::rewrite(ts).into(), + Err(e) => e.to_compile_error().into(), + } +} + +#[cfg(feature = "migrate")] +#[proc_macro] +pub fn migrate(input: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(input as Option); + match sqlx_macros_core::migrate::expand(input) { + // Wrap the TokenStream in a block so it can be parsed as a syn::Stmt. + // Otherwise we'd have to special case this to a syn::Expr. + // + // NOTE: Stmt::Expr variant does not normally apply here! + Ok(ts) => parse::rewrite(quote::quote!({#ts})).into(), + Err(e) => { + if let Some(parse_err) = e.downcast_ref::() { + parse_err.to_compile_error().into() + } else { + let msg = e.to_string(); + quote::quote!(::std::compile_error!(#msg)).into() + } + } + } +} + +#[cfg(feature = "macros")] +#[proc_macro_attribute] +pub fn test(args: TokenStream, input: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(input as syn::ItemFn); + + match sqlx_macros_core::test_attr::expand(args.into(), input) { + Ok(ts) => parse::rewrite(ts).into(), Err(e) => { if let Some(parse_err) = e.downcast_ref::() { parse_err.to_compile_error().into() diff --git a/sqlx-exasol-macros/src/parse.rs b/sqlx-exasol-macros/src/parse.rs new file mode 100644 index 00000000..34644c31 --- /dev/null +++ b/sqlx-exasol-macros/src/parse.rs @@ -0,0 +1,57 @@ +use proc_macro2::TokenStream; +use syn::{parse::Parse, visit_mut::VisitMut, ItemUse, Path, Stmt, UseTree}; + +/// Rewrites `::sqlx::` to `::sqlx_exasol::`. +pub fn rewrite(token_stream: TokenStream) -> TokenStream { + let mut stmts: Vec = match syn::parse2(token_stream) { + Ok(Stmts(stmts)) => stmts, + Err(err) => return err.to_compile_error(), + }; + + for stmt in &mut stmts { + SqlxToSqlxExasol.visit_stmt_mut(stmt); + } + + quote::quote!(#(#stmts)*) +} + +struct Stmts(Vec); + +impl Parse for Stmts { + fn parse(input: syn::parse::ParseStream<'_>) -> syn::Result { + let mut items = Vec::new(); + while !input.is_empty() { + items.push(input.parse()?); + } + Ok(Self(items)) + } +} + +struct SqlxToSqlxExasol; + +impl VisitMut for SqlxToSqlxExasol { + fn visit_path_mut(&mut self, path: &mut Path) { + // Recurse to also match inner paths such as generics. + syn::visit_mut::visit_path_mut(self, path); + + // Match ::sqlx::... + if path.leading_colon.is_some() { + if let Some(segment) = path.segments.first_mut() { + if segment.ident == "sqlx" { + segment.ident = syn::Ident::new("sqlx_exasol", segment.ident.span()); + } + } + } + } + + fn visit_item_use_mut(&mut self, item_use: &mut ItemUse) { + // Match ::sqlx::... + if item_use.leading_colon.is_some() { + if let UseTree::Path(path) = &mut item_use.tree { + if path.ident == "sqlx" { + path.ident = syn::Ident::new("sqlx_exasol", path.ident.span()); + } + } + } + } +} diff --git a/src/lib.md b/src/lib.md new file mode 100644 index 00000000..3c2fdbe5 --- /dev/null +++ b/src/lib.md @@ -0,0 +1,217 @@ +A database driver for Exasol to be used with the Rust [sqlx](https://github.com/launchbadge/sqlx) framework. + +Based on `sqlx` version `0.9.0-alpha.1`. + +## Features flags + +- `etl` - Add support for ETL jobs. +- `compression` - Add compression support (for both connections and ETL jobs). +- `any` - Add support for the `Any` database driver, which can proxy to a database driver at + runtime. +- `derive` - Add support for the derive family macros, those are `FromRow`, `Type`, `Encode`, + `Decode`. +- `macros` - Add support for the `query*!` macros, which allows compile-time checked queries. +- `migrate` - Add support for the migration management and `migrate!` macro, which allow + compile-time embedded migrations. +- `uuid` - Add support for UUID. +- `chrono` - Add support for date and time types from `chrono`. +- `time` - Add support for date and time types from `time` crate (alternative to `chrono`, which + is preferred by `query!` macro, if both enabled). +- `bigdecimal` - Add support for `BigDecimal` from the `bigdecimal` crate. +- `rust_decimal` - Add support for `Decimal` from the `rust_decimal` crate. +- `geo-types` - Add support for `Geometry` and its variants from the `geo-types` crate. +- `json` - Add support for `Json` as well as `serde_json::Value` and `serde_json::RawValue`. + +## Supported types + +See the [`types`] module. + +## Comparison to native sqlx drivers + +The driver re-exports all `sqlx` public API and implements the exposed traits. As a result, +it can do all the drivers shipped with `sqlx` do, with some caveats: + +- Limitations + - separate CLI utility (`sqlx-exasol` instead of `sqlx`) + - compile time query macros cannot work along the ones from `sqlx` within the same crate + - no locking migrations support[1](#no_locks) + - no column nullability checks[2](#nullable) + +- Additions + - array-like parameter binding in queries, thanks to the columnar nature of the Exasol + database + - performant & parallelizable ETL IMPORT/EXPORT jobs in CSV format through HTTP Transport + +## Compile-time query checks + +The driver now supports compile-time query validation and can be used alongside +`sqlx` within the same crate. Note however that derive proc-macros from `sqlx` are +database agnostic and thus `sqlx-exasol` just re-exports them as-is. + +## CLI utility + +The driver uses its own CLI utility (built on the same `sqlx-cli` library): + +```sh +cargo install sqlx-exasol-cli + +# Usage is exactly the same as sqlx-cli +sqlx-exasol database create +sqlx-exasol database drop +sqlx-exasol migrate add +sqlx-exasol migrate run +cargo sqlx-exasol prepare +``` + +## Connection string + +The connection string is expected to be an URL with the `exa://` scheme, e.g: +`exa://sys:exasol@localhost:8563`. + +See [`ExaConnectOptions`] for a list of supported connection string parameters. + +## HTTP Transport + +Functionality that allows performant data import/export by creation of one-shot HTTP servers +to which Exasol connects to (at most one per node), thus balancing the load. + +The data is always in `CSV` format and job configuration can be done through the +[`ImportBuilder`](etl::ImportBuilder) and [`ExportBuilder`](etl::ExportBuilder) structs. +The workers implement `AsyncWrite` and `AsyncRead` respectively, providing great flexibility in +terms of how the data is processed. + +The general flow of an ETL job is: + +- build the job through [`ImportBuilder`](etl::ImportBuilder) or + [`ExportBuilder`](etl::ExportBuilder) +- concurrently wait on the query execution future (typically from the main thread) and on worker + operations (async tasks can be spawned in multi-threaded runtimes to further parallelize the + workload). +- when all the workers are done (readers reach EOF, while writers require an explicit `close()`) + the job ends and the query execution future returns. +- an error/timeout issue results in the query execution future or a worker throwing an error, + therefore consider joining the tasks and aborting them if an error is thrown somewhere. + +## Examples + +Using the driver for regular database interactions: + +```rust,no_run +use std::env; + +use sqlx_exasol::{error::*, *}; + +# async { +# +let pool = ExaPool::connect(&env::var("DATABASE_URL").unwrap()).await?; +let mut con = pool.acquire().await?; + +sqlx_exasol::query("CREATE SCHEMA RUST_DOC_TEST") + .execute(&mut *con) + .await?; +# +# let res: Result<(), BoxDynError> = Ok(()); +# res +# }; +``` + +Array-like parameter binding, also featuring the [`crate::types::ExaIter`] adapter. +An important thing to note is that the parameter sets must be of equal length, +otherwise an error is thrown: + +```rust,no_run +use std::{collections::HashSet, env}; + +use sqlx_exasol::{error::*, *}; + +# async { +# +let pool = ExaPool::connect(&env::var("DATABASE_URL").unwrap()).await?; +let mut con = pool.acquire().await?; + +let params1 = vec![1, 2, 3]; +let params2 = HashSet::from([1, 2, 3]); + +sqlx_exasol::query("INSERT INTO MY_TABLE VALUES (?, ?)") + .bind(¶ms1) + .bind(types::ExaIter::new(params2.iter())) + .execute(&mut *con) + .await?; +# +# let res: Result<(), BoxDynError> = Ok(()); +# res +# }; +``` + +An EXPORT - IMPORT ETL data pipe. + +```rust,no_run +# #[cfg(feature = "etl")] { +use std::env; + +use futures_util::{ + future::{try_join, try_join3, try_join_all}, + AsyncReadExt, AsyncWriteExt, TryFutureExt, +}; +use sqlx_exasol::{error::*, etl::*, *}; + +async fn pipe(mut reader: ExaExport, mut writer: ExaImport) -> Result<(), BoxDynError> { + let mut buf = vec![0; 5120].into_boxed_slice(); + let mut read = 1; + + while read > 0 { + // Readers return EOF when there's no more data. + read = reader.read(&mut buf).await?; + // Write data to Exasol + writer.write_all(&buf[..read]).await?; + } + + // Writes, unlike readers, MUST be closed to signal we won't send more data to Exasol + writer.close().await?; + Ok(()) +} + +# async { +# +let pool = ExaPool::connect(&env::var("DATABASE_URL").unwrap()).await?; +let mut con1 = pool.acquire().await?; +let mut con2 = pool.acquire().await?; + +// Build EXPORT job +let (export_fut, readers) = ExportBuilder::new_from_table("TEST_ETL", None) + .build(&mut con1) + .await?; + +// Build IMPORT job +let (import_fut, writers) = ImportBuilder::new("TEST_ETL", None) + .build(&mut con2) + .await?; + +// Use readers and writers in some futures +let transport_futs = std::iter::zip(readers, writers).map(|(r, w)| pipe(r, w)); + +// Execute the EXPORT and IMPORT query futures along with the worker futures +let (export_res, import_res, _) = try_join3( + export_fut.map_err(From::from), + import_fut.map_err(From::from), + try_join_all(transport_futs), +) +.await?; + +assert_eq!(export_res.rows_affected(), import_res.rows_affected()); +# +# let res: Result<(), BoxDynError> = Ok(()); +# res +# }}; +``` + +## Footnotes + +1: Exasol has no advisory or database locks and simple, unnested, +transactions are unfortunately not enough to define a mechanism so that concurrent migrations do +not collide. This does **not** pose a problem when migrations are run sequentially or do not act +on the same database objects. + +2: Exasol does not provide the information of whether a column is +nullable or not, so the driver cannot implicitly decide whether a `NULL` value can go into a +certain database column or not until it actually tries. diff --git a/src/lib.rs b/src/lib.rs index 28942677..d4be6c38 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,329 +1,68 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] -//! A database driver for Exasol to be used with the Rust [sqlx](https://github.com/launchbadge/sqlx) framework. -//! -//! Based on `sqlx` version `0.9.0-alpha.1`. -//! -//! ## Features flags -//! -//! - `etl` - Add support for ETL jobs. -//! - `compression` - Add compression support (for both connections and ETL jobs). -//! - `any` - Add support for the `Any` database driver, which can proxy to a database driver at -//! runtime. -//! - `derive` - Add support for the derive family macros, those are `FromRow`, `Type`, `Encode`, -//! `Decode`. -//! - `macros` - Add support for the `query*!` macros, which allows compile-time checked queries. -//! - `migrate` - Add support for the migration management and `migrate!` macro, which allow -//! compile-time embedded migrations. -//! - `uuid` - Add support for UUID. -//! - `chrono` - Add support for date and time types from `chrono`. -//! - `time` - Add support for date and time types from `time` crate (alternative to `chrono`, which -//! is preferred by `query!` macro, if both enabled). -//! - `bigdecimal` - Add support for `BigDecimal` from the `bigdecimal` crate. -//! - `rust_decimal` - Add support for `Decimal` from the `rust_decimal` crate. -//! - `geo-types` - Add support for `Geometry` and its variants from the `geo-types` crate. -//! - `json` - Add support for `Json` as well as `serde_json::Value` and `serde_json::RawValue`. -//! -//! ## Supported types -//! -//! See the [`types`] module. -//! -//! ## Comparison to native sqlx drivers -//! -//! The driver re-exports all `sqlx` public API and implements the exposed traits. As a result, -//! it can do all the drivers shipped with `sqlx` do, with some caveats: -//! -//! - Limitations -//! - separate CLI utility (`sqlx-exasol` instead of `sqlx`) -//! - compile time query macros cannot work along the ones from `sqlx` within the same crate -//! - no locking migrations support[1](#no_locks) -//! - no column nullability checks[2](#nullable) -//! -//! - Additions -//! - array-like parameter binding in queries, thanks to the columnar nature of the Exasol -//! database -//! - performant & parallelizable ETL IMPORT/EXPORT jobs in CSV format through HTTP Transport -//! -//! ## Compile-time query checks -//! -//! The driver now supports compile-time query validation. -//! -//! However, full functionality is implemented through path overrides and due to `sqlx` macros -//! implementation details you will currently need to either add `extern crate sqlx_exasol as sqlx;` -//! to the root of your crate or rename the crate import to `sqlx` in `Cargo.toml`: -//! -//! ```toml -//! sqlx = { version = "*", package = "sqlx-exasol" } -//! ``` -//! -//! This implies that the compile time query macros from both `sqlx-exasol` and `sqlx` -//! cannot co-exist within the same crate without collisions or unexpected surprises. -//! -//! See for more details. -//! -//! ## CLI utility -//! -//! The driver uses its own CLI utility (built on the same `sqlx-cli` library): -//! ```sh -//! cargo install sqlx-exasol-cli -//! -//! # Usage is exactly the same as sqlx-cli -//! sqlx-exasol database create -//! sqlx-exasol database drop -//! sqlx-exasol migrate add -//! sqlx-exasol migrate run -//! cargo sqlx-exasol prepare -//! ``` -//! -//! ## Connection string -//! -//! The connection string is expected to be an URL with the `exa://` scheme, e.g: -//! `exa://sys:exasol@localhost:8563`. -//! -//! See [`ExaConnectOptions`] for a list of supported connection string parameters. -//! -//! ## HTTP Transport -//! Functionality that allows performant data import/export by creation of one-shot HTTP servers -//! to which Exasol connects to (at most one per node), thus balancing the load. -//! -//! The data is always in `CSV` format and job configuration can be done through the -//! [`ImportBuilder`](etl::ImportBuilder) and [`ExportBuilder`](etl::ExportBuilder) structs. -//! The workers implement `AsyncWrite` and `AsyncRead` respectively, providing great flexibility in -//! terms of how the data is processed. -//! -//! The general flow of an ETL job is: -//! - build the job through [`ImportBuilder`](etl::ImportBuilder) or -//! [`ExportBuilder`](etl::ExportBuilder) -//! - concurrently wait on the query execution future (typically from the main thread) and on worker -//! operations (async tasks can be spawned in multi-threaded runtimes to further parallelize the -//! workload). -//! - when all the workers are done (readers reach EOF, while writers require an explicit `close()`) -//! the job ends and the query execution future returns. -//! - an error/timeout issue results in the query execution future or a worker throwing an error, -//! therefore consider joining the tasks and aborting them if an error is thrown somewhere. -//! -//! ## Examples -//! Using the driver for regular database interactions: -//! ```rust,no_run -//! use std::env; -//! -//! use sqlx_exasol::{error::*, *}; -//! -//! # async { -//! # -//! let pool = ExaPool::connect(&env::var("DATABASE_URL").unwrap()).await?; -//! let mut con = pool.acquire().await?; -//! -//! sqlx_exasol::query("CREATE SCHEMA RUST_DOC_TEST") -//! .execute(&mut *con) -//! .await?; -//! # -//! # let res: Result<(), BoxDynError> = Ok(()); -//! # res -//! # }; -//! ``` -//! -//! Array-like parameter binding, also featuring the [`crate::types::ExaIter`] adapter. -//! An important thing to note is that the parameter sets must be of equal length, -//! otherwise an error is thrown: -//! ```rust,no_run -//! use std::{collections::HashSet, env}; -//! -//! use sqlx_exasol::{error::*, *}; -//! -//! # async { -//! # -//! let pool = ExaPool::connect(&env::var("DATABASE_URL").unwrap()).await?; -//! let mut con = pool.acquire().await?; -//! -//! let params1 = vec![1, 2, 3]; -//! let params2 = HashSet::from([1, 2, 3]); -//! -//! sqlx_exasol::query("INSERT INTO MY_TABLE VALUES (?, ?)") -//! .bind(¶ms1) -//! .bind(types::ExaIter::new(params2.iter())) -//! .execute(&mut *con) -//! .await?; -//! # -//! # let res: Result<(), BoxDynError> = Ok(()); -//! # res -//! # }; -//! ``` -//! -//! An EXPORT - IMPORT ETL data pipe. -//! ```rust,no_run -//! # #[cfg(feature = "etl")] { -//! use std::env; -//! -//! use futures_util::{ -//! future::{try_join, try_join3, try_join_all}, -//! AsyncReadExt, AsyncWriteExt, TryFutureExt, -//! }; -//! use sqlx_exasol::{error::*, etl::*, *}; -//! -//! async fn pipe(mut reader: ExaExport, mut writer: ExaImport) -> Result<(), BoxDynError> { -//! let mut buf = vec![0; 5120].into_boxed_slice(); -//! let mut read = 1; -//! -//! while read > 0 { -//! // Readers return EOF when there's no more data. -//! read = reader.read(&mut buf).await?; -//! // Write data to Exasol -//! writer.write_all(&buf[..read]).await?; -//! } -//! -//! // Writes, unlike readers, MUST be closed to signal we won't send more data to Exasol -//! writer.close().await?; -//! Ok(()) -//! } -//! -//! # async { -//! # -//! let pool = ExaPool::connect(&env::var("DATABASE_URL").unwrap()).await?; -//! let mut con1 = pool.acquire().await?; -//! let mut con2 = pool.acquire().await?; -//! -//! // Build EXPORT job -//! let (export_fut, readers) = ExportBuilder::new_from_table("TEST_ETL", None) -//! .build(&mut con1) -//! .await?; -//! -//! // Build IMPORT job -//! let (import_fut, writers) = ImportBuilder::new("TEST_ETL", None) -//! .build(&mut con2) -//! .await?; -//! -//! // Use readers and writers in some futures -//! let transport_futs = std::iter::zip(readers, writers).map(|(r, w)| pipe(r, w)); -//! -//! // Execute the EXPORT and IMPORT query futures along with the worker futures -//! let (export_res, import_res, _) = try_join3( -//! export_fut.map_err(From::from), -//! import_fut.map_err(From::from), -//! try_join_all(transport_futs), -//! ) -//! .await?; -//! -//! assert_eq!(export_res.rows_affected(), import_res.rows_affected()); -//! # -//! # let res: Result<(), BoxDynError> = Ok(()); -//! # res -//! # }}; -//! ``` -//! -//! ## Footnotes -//! 1: Exasol has no advisory or database locks and simple, unnested, -//! transactions are unfortunately not enough to define a mechanism so that concurrent migrations do -//! not collide. This does **not** pose a problem when migrations are run sequentially or do not act -//! on the same database objects. -//! -//! 2: Exasol does not provide the information of whether a column is -//! nullable or not, so the driver cannot implicitly decide whether a `NULL` value can go into a -//! certain database column or not until it actually tries. +#![doc = include_str!("lib.md")] -pub use sqlx::*; +pub use sqlx_a_orig::*; pub use sqlx_exasol_impl::*; -/// # Supported types -/// -/// | Rust type | Exasol type | -/// | :------------------------ | :-------------------------------------------- | -/// | `bool` | `BOOLEAN` | -/// | `i8`, `i16`, `i32`, `i64` | `DECIMAL` | -/// | `f64` | `DOUBLE` | -/// | `String`, `&str` | `CHAR(n) ASCII/UTF8`, `VARCHAR(n) ASCII/UTF8` | -/// | `ExaIntervalYearToMonth` | `INTERVAL YEAR TO MONTH` | -/// | `HashType` | `HASHTYPE` | -/// | `Option` | `T` (for any `T` that implements `Type`) | -/// -/// ## `chrono` feature -/// -/// | Rust type | Exasol type | -/// | :---------------------- | :----------------------- | -/// | `chrono::NaiveDate` | `DATE` | -/// | `chrono::NaiveDateTime` | `TIMESTAMP` | -/// | `chrono::TimeDelta` | `INTERVAL DAY TO SECOND` | -/// -/// ## `time` feature -/// -/// | Rust type | Exasol type | -/// | :------------------------ | :----------------------- | -/// | `time::Date` | `DATE` | -/// | `time::PrimitiveDateTime` | `TIMESTAMP` | -/// | `time::Duration` | `INTERVAL DAY TO SECOND` | -/// -/// ## `rust_decimal` feature -/// -/// | Rust type | Exasol type | -/// | :---------------------- | :------------- | -/// | `rust_decimal::Decimal` | `DECIMAL(p,s)` | -/// -/// ## `bigdecimal` feature -/// -/// | Rust type | Exasol type | -/// | :----------------------- | :------------- | -/// | `bigdecimal::BigDecimal` | `DECIMAL(p,s)` | -/// -/// ## `uuid` feature -/// -/// | Rust type | Exasol type | -/// | :----------- | :---------- | -/// | `uuid::Uuid` | `HASHTYPE` | -/// -/// ## `geo-types` feature -/// -/// | Rust type | Exasol type | -/// | :-------------------- | :---------- | -/// | `geo_types::Geometry` | `GEOMETRY` | -/// -/// **Note:** due to a [bug in the Exasol websocket -/// API](httpsf://github.com/exasol/websocket-api/issues/39), `GEOMETRY` can't be used as prepared -/// statement bind parameters. It can, however, be used as a column in a returned result set or with -/// runtime checked queries. -/// -/// ## `json` feature -/// -/// The `json` feature enables `Encode` and `Decode` implementations for `Json`, -/// `serde_json::Value` and `&serde_json::value::RawValue`. -/// -/// ## Array-like parameters -/// -/// Array-like types can be passed as parameters, including in compile time checked queries, -/// for batch parameter binding due to Exasol's columnar nature. -/// -/// Supported types are [`Vec`], `&T` (slices), [`[T;N]`] (arrays), iterators through the -/// [`ExaIter`](crate::types::ExaIter) adapter, etc. -/// -/// Parameter arrays must be of equal length (runtime checked) or an error will be thrown otherwise. -/// -/// Custom types that implement [`Type`] can be used in array-like types by implementing -/// the [`ExaHasArrayType`](crate::types::ExaHasArrayType) marker trait for them. +/// Prevent re-exporting other drivers if used alongside `sqlx`. +mod postgres {} +mod sqlite {} +mod mysql {} + +pub mod any { + pub use sqlx_a_orig::any::*; + pub use sqlx_exasol_impl::any::DRIVER; +} + +#[cfg(any(feature = "derive", feature = "macros"))] +#[doc(hidden)] +pub extern crate sqlx_exasol_macros; + +#[cfg(feature = "macros")] +pub use sqlx_exasol_macros::test; +#[cfg(feature = "derive")] +#[doc(hidden)] +pub use sqlx_exasol_macros::{FromRow, Type}; + +#[cfg(feature = "macros")] +mod macros; + +#[cfg(feature = "macros")] +#[doc(hidden)] +pub mod ty_match; + +#[doc = include_str!("types.md")] pub mod types { - pub use sqlx::types::*; + pub use sqlx_a_orig::types::*; pub use sqlx_exasol_impl::types::*; #[cfg(feature = "chrono")] pub mod chrono { - pub use sqlx::types::chrono::*; + pub use sqlx_a_orig::types::chrono::*; pub use sqlx_exasol_impl::types::chrono::*; } #[cfg(feature = "time")] pub mod time { - pub use sqlx::types::time::*; + pub use sqlx_a_orig::types::time::*; pub use sqlx_exasol_impl::types::time::*; } -} -pub mod any { - pub use sqlx::any::*; - pub use sqlx_exasol_impl::any::DRIVER; + #[cfg(feature = "derive")] + #[doc(hidden)] + pub use sqlx_exasol_macros::Type; } -#[cfg(feature = "macros")] -pub use sqlx_exasol_macros; -#[cfg(feature = "macros")] -mod macros; +pub mod encode { + pub use sqlx_a_orig::encode::*; + #[cfg(feature = "derive")] + #[doc(hidden)] + pub use sqlx_exasol_macros::Encode; +} -#[cfg(feature = "macros")] -#[doc(hidden)] -pub mod ty_match; +pub mod decode { + pub use sqlx_a_orig::decode::*; + #[cfg(feature = "derive")] + #[doc(hidden)] + pub use sqlx_exasol_macros::Decode; +} diff --git a/src/macros.rs b/src/macros.rs index eac467ff..fee698a4 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -131,3 +131,15 @@ macro_rules! query_file_scalar_unchecked ( $crate::sqlx_exasol_macros::expand_query!(scalar = _, source_file = $path, args = [$($args)*], checked = false) ) ); + +#[cfg(feature = "migrate")] +#[macro_export] +macro_rules! migrate { + ($dir:literal) => {{ + $crate::sqlx_exasol_macros::migrate!($dir) + }}; + + () => {{ + $crate::sqlx_exasol_macros::migrate!() + }}; +} diff --git a/src/types.md b/src/types.md new file mode 100644 index 00000000..f4b3ee5e --- /dev/null +++ b/src/types.md @@ -0,0 +1,74 @@ +# Supported types + +| Rust type | Exasol type | +| :------------------------ | :-------------------------------------------- | +| `bool` | `BOOLEAN` | +| `i8`, `i16`, `i32`, `i64` | `DECIMAL` | +| `f64` | `DOUBLE` | +| `String`, `&str` | `CHAR(n) ASCII/UTF8`, `VARCHAR(n) ASCII/UTF8` | +| `ExaIntervalYearToMonth` | `INTERVAL YEAR TO MONTH` | +| `HashType` | `HASHTYPE` | +| `Option` | `T` (for any `T` that implements `Type`) | + +## `chrono` feature + +| Rust type | Exasol type | +| :---------------------- | :----------------------- | +| `chrono::NaiveDate` | `DATE` | +| `chrono::NaiveDateTime` | `TIMESTAMP` | +| `chrono::TimeDelta` | `INTERVAL DAY TO SECOND` | + +## `time` feature + +| Rust type | Exasol type | +| :------------------------ | :----------------------- | +| `time::Date` | `DATE` | +| `time::PrimitiveDateTime` | `TIMESTAMP` | +| `time::Duration` | `INTERVAL DAY TO SECOND` | + +## `rust_decimal` feature + +| Rust type | Exasol type | +| :---------------------- | :------------- | +| `rust_decimal::Decimal` | `DECIMAL(p,s)` | + +## `bigdecimal` feature + +| Rust type | Exasol type | +| :----------------------- | :------------- | +| `bigdecimal::BigDecimal` | `DECIMAL(p,s)` | + +## `uuid` feature + +| Rust type | Exasol type | +| :----------- | :---------- | +| `uuid::Uuid` | `HASHTYPE` | + +## `geo-types` feature + +| Rust type | Exasol type | +| :-------------------- | :---------- | +| `geo_types::Geometry` | `GEOMETRY` | + +**Note:** due to a [bug in the Exasol websocket +API](httpsf://github.com/exasol/websocket-api/issues/39), `GEOMETRY` can't be used as prepared +statement bind parameters. It can, however, be used as a column in a returned result set or with +runtime checked queries. + +## `json` feature + +The `json` feature enables `Encode` and `Decode` implementations for `Json`, +`serde_json::Value` and `&serde_json::value::RawValue`. + +## Array-like parameters + +Array-like types can be passed as parameters, including in compile time checked queries, +for batch parameter binding due to Exasol's columnar nature. + +Supported types are [`Vec`], `&T` (slices), [`[T;N]`] (arrays), iterators through the +[`ExaIter`](crate::types::ExaIter) adapter, etc. + +Parameter arrays must be of equal length (runtime checked) or an error will be thrown otherwise. + +Custom types that implement [`Type`] can be used in array-like types by implementing +the [`ExaHasArrayType`](crate::types::ExaHasArrayType) marker trait for them. diff --git a/tests/common.rs b/tests/common.rs index 2c8ad4f7..e965442d 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -586,21 +586,21 @@ async fn test_fetch_many_works(mut con: PoolConnection) -> Result<(), Bo #[sqlx_exasol::test] async fn it_works_on_large_datasets(mut con: PoolConnection) -> Result<(), BoxDynError> { - sqlx::query("CREATE TABLE large_dataset (col1 VARCHAR(20), col2 VARCHAR(20));") + sqlx_exasol::query("CREATE TABLE large_dataset (col1 VARCHAR(20), col2 VARCHAR(20));") .execute(&mut *con) .await?; let data = vec!["test"; 100_000]; for _ in 0..50 { - sqlx::query("INSERT INTO large_dataset VALUES(?, ?);") + sqlx_exasol::query("INSERT INTO large_dataset VALUES(?, ?);") .bind(&data) .bind(&data) .execute(&mut *con) .await?; } - let mut rows = sqlx::query("SELECT col1, col2 FROM large_dataset").fetch(&mut *con); + let mut rows = sqlx_exasol::query("SELECT col1, col2 FROM large_dataset").fetch(&mut *con); while let Some(row_result) = rows.try_next().await? { let row = row_result; @@ -618,7 +618,7 @@ async fn it_selects_schema( let pool = pool_opts.connect_with(exa_opts).await?; let mut con = pool.acquire().await?; - let schema: Option = sqlx::query_scalar("SELECT CURRENT_SCHEMA") + let schema: Option = sqlx_exasol::query_scalar("SELECT CURRENT_SCHEMA") .fetch_one(&mut *con) .await?; @@ -641,7 +641,7 @@ async fn it_switches_schema( )) .await?; - let new_schema: String = sqlx::query_scalar("SELECT CURRENT_SCHEMA") + let new_schema: String = sqlx_exasol::query_scalar("SELECT CURRENT_SCHEMA") .fetch_one(&mut *con) .await?; @@ -663,7 +663,7 @@ async fn it_switches_schema_from_attr( let pool = pool_opts.connect_with(exa_opts).await?; let mut con = pool.acquire().await?; - let orig_schema: String = sqlx::query_scalar("SELECT CURRENT_SCHEMA") + let orig_schema: String = sqlx_exasol::query_scalar("SELECT CURRENT_SCHEMA") .fetch_one(&mut *con) .await?; @@ -677,7 +677,7 @@ async fn it_switches_schema_from_attr( con.attributes_mut().set_current_schema(orig_schema.clone()); con.flush_attributes().await?; - let new_schema: String = sqlx::query_scalar("SELECT CURRENT_SCHEMA") + let new_schema: String = sqlx_exasol::query_scalar("SELECT CURRENT_SCHEMA") .fetch_one(&mut *con) .await?; @@ -694,7 +694,7 @@ async fn it_closes_schema_and_returns_none( let pool = pool_opts.connect_with(exa_opts).await?; let mut con = pool.acquire().await?; - let orig_schema: String = sqlx::query_scalar("SELECT CURRENT_SCHEMA") + let orig_schema: String = sqlx_exasol::query_scalar("SELECT CURRENT_SCHEMA") .fetch_one(&mut *con) .await?; diff --git a/tests/compile_time.rs b/tests/compile_time.rs index 2b693541..03549ed8 100644 --- a/tests/compile_time.rs +++ b/tests/compile_time.rs @@ -1,12 +1,9 @@ #![allow(deprecated)] #![cfg(all(feature = "migrate", feature = "macros"))] -extern crate sqlx_exasol as sqlx; - mod macros; -use sqlx::migrate::Migrator; -use sqlx_exasol::{error::BoxDynError, types::ExaIntervalYearToMonth}; +use sqlx_exasol::{error::BoxDynError, migrate::Migrator, types::ExaIntervalYearToMonth}; #[allow(dead_code)] static MIGRATOR: Migrator = sqlx_exasol::migrate!("tests/migrations_compile_time");