diff --git a/.github/README.md.in b/.github/README.md.in deleted file mode 100644 index b7372e4eb1..0000000000 --- a/.github/README.md.in +++ /dev/null @@ -1,76 +0,0 @@ -# GRDB + SQLCipher - -## What is this? -This is a fork of [GRDB](https://github.com/groue/GRDB.swift) which contains a [SQLCipher Community Edition](https://www.zetetic.net/sqlcipher/open-source/) amalgamation packaged so that it can be consumed as a Swift Package. - -The default branch for this repository is `SQLCipher` so that we can more easily pull upstream changes if we need to. - -## Versioning - -* This Package: *${new_version}* -* GRDB: *${upstream_version}* -* SQLCipher: *${sqlcipher_version}* - -## Contributions -We do not accept contributions to this repository at this time. However, feel free to open an issue in order to start a discussion. - -## We are hiring! -DuckDuckGo is growing fast and we continue to expand our fully distributed team. We embrace diverse perspectives, and seek out passionate, self-motivated people, committed to our shared vision of raising the standard of trust online. If you are a senior software engineer capable in either iOS or Android, visit our [careers](https://duckduckgo.com/hiring/#open) page to find out more about our openings! - -## Updating from Upstream - -Add remote upstream: - -* `git remote add upstream git@github.com:groue/GRDB.swift.git` - -Check out upstream's master branch locally: - -* `git fetch upstream +master:upstream-master && git checkout upstream-master` - -Update upstream's master branch if needed: - -* `git pull upstream master` - -Switch back to the `SQLCipher` branch and merge with upstream-master: - -* `git merge upstream-master` - -Resolve any conflicts that may occur (normally there should be none or only in Package.swift) -and commit the merge. Once done, run `prepare_release.sh` script to fetch and compile the latest tag -of SQLCipher and embed it in GRDB.swift: - -* `./prepare_release.sh` - -The script will also: -* present the summary of updated versions and ask you to pick the new version number for DuckDuckGo GRDB fork, -* test the build, -* create a new release branch and commit changes. - -For versioning, follow [Semantic Versioning Rules](https://semver.org), but note you don't need -to use the same version as GRDB. Examples: - -* Upstream GRDB 5.6.0, after merge -> 5.12.0 - * This project 1.0.0 -> 1.1.0 - -* Upstream GRDB 5.12.0, after merge -> 6.0.0 - * This project 1.1.0 -> 2.0.0 - -If everything looks fine: -* push your branch, -* create PR for BSK referencing the new branch, -* create PRs for iOS and macOS apps referencing your BSK branch. - -Once approved: -* merge your branch back to `SQLCipher`, -* create a tag matching the release number **without the 'v' prefix** (those are reserved for upstream), -* push the tag, -* update the reference to GRDB in BSK to point to a tag. - -### Compiling SQLCipher manually - -In case `prepare_release.sh` script fails, you need to compile SQLCipher amalgamation package -manually. See [general instructions](https://github.com/sqlcipher/sqlcipher#compiling-for-unix-like-systems): - -* Use `./configure --with-crypto-lib=none`. -* Remember to use `make sqlite3.c` and not `make`. -* Copy `sqlite3.c` and `sqlite3.h` to `Sources/SQLCipher/sqlite3.c` and `Sources/SQLCipher/include/sqlite3.h`. diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index d0a4e0bac4..6a9f3ec808 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -13,6 +13,7 @@ on: - '.github/workflows/**' - 'Makefile' - 'Package.swift' + - 'SQLiteCustom/src' pull_request: paths: - 'GRDB/**' @@ -20,6 +21,7 @@ on: - '.github/workflows/**' - 'Makefile' - 'Package.swift' + - 'SQLiteCustom/src' concurrency: group: ${{ github.ref_name }} @@ -38,18 +40,34 @@ jobs: fail-fast: false matrix: include: + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + destination: "platform=macOS" + name: "macOS" + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + destination: "OS=16.4,name=iPhone 14 Pro" + name: "iOS" + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + destination: "OS=16.4,name=Apple TV" + name: "tvOS" + - xcode: "Xcode_14.2.app" + runsOn: macOS-13 + destination: "platform=macOS" + name: "macOS" + - xcode: "Xcode_14.2.app" + runsOn: macOS-13 + destination: "OS=16.2,name=iPhone 14" + name: "iOS" - xcode: "Xcode_14.1.app" - runsOn: macOS-12 + runsOn: macOS-13 destination: "platform=macOS" name: "macOS" - xcode: "Xcode_14.1.app" - runsOn: macOS-12 + runsOn: macOS-13 destination: "OS=16.1,name=iPhone 14" name: "iOS" - - xcode: "Xcode_14.1.app" - runsOn: macOS-12 - destination: "OS=16.1,name=Apple TV" - name: "tvOS" - xcode: "Xcode_14.0.1.app" runsOn: macOS-12 destination: "platform=macOS" @@ -59,7 +77,7 @@ jobs: destination: "OS=16.0,name=iPhone 14" name: "iOS" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: ${{ matrix.name }} run: set -o pipefail && env NSUnbufferedIO=YES xcodebuild -project GRDB.xcodeproj -scheme GRDB -destination "${{ matrix.destination }}" OTHER_SWIFT_FLAGS='$(inherited) -D SQLITE_ENABLE_FTS5 -D SQLITE_ENABLE_PREUPDATE_HOOK' GCC_PREPROCESSOR_DEFINITIONS='$(inherited) GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1' clean test SPM: @@ -72,14 +90,20 @@ jobs: fail-fast: false matrix: include: + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + name: "Xcode 14.3.1" + - xcode: "Xcode_14.2.app" + runsOn: macOS-13 + name: "Xcode 14.2" - xcode: "Xcode_14.1.app" - runsOn: macOS-12 + runsOn: macOS-13 name: "Xcode 14.1" - xcode: "Xcode_14.0.1.app" runsOn: macOS-12 name: "Xcode 14.0.1" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: ${{ matrix.name }} run: make test_SPM test_install_SPM SQLCipher3: @@ -92,11 +116,14 @@ jobs: fail-fast: false matrix: include: - - xcode: "Xcode_14.1.app" + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + name: "Xcode 14.3.1" + - xcode: "Xcode_14.0.1.app" runsOn: macOS-12 - name: "Xcode 14.1" + name: "Xcode 14.0.1" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: ${{ matrix.name }} run: make test_framework_SQLCipher3Encrypted SQLCipher4: @@ -109,11 +136,14 @@ jobs: fail-fast: false matrix: include: - - xcode: "Xcode_14.1.app" + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + name: "Xcode 14.3.1" + - xcode: "Xcode_14.0.1.app" runsOn: macOS-12 - name: "Xcode 14.1" + name: "Xcode 14.0.1" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: ${{ matrix.name }} run: make test_framework_SQLCipher4Encrypted CustomSQLite: @@ -126,11 +156,14 @@ jobs: fail-fast: false matrix: include: - - xcode: "Xcode_14.1.app" + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + name: "Xcode 14.3.1" + - xcode: "Xcode_14.0.1.app" runsOn: macOS-12 - name: "Xcode 14.1" + name: "Xcode 14.0.1" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: ${{ matrix.name }} run: make test_framework_GRDBCustomSQLiteOSX XCFramework: @@ -143,11 +176,14 @@ jobs: fail-fast: false matrix: include: - - xcode: "Xcode_14.1.app" + - xcode: "Xcode_14.3.1.app" + runsOn: macOS-13 + name: "Xcode 14.3.1" + - xcode: "Xcode_14.0.1.app" runsOn: macOS-12 - name: "Xcode 14.1" + name: "Xcode 14.0.1" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: ${{ matrix.name }} - run: make test_archive_GRDBOSX_xcframework + run: make test_universal_xcframework \ No newline at end of file diff --git a/.gitignore b/.gitignore index cd1383aee8..7c7e11035d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -## https://github.com/github/gitignore/blob/master/Global/OSX.gitignore +## https://github.com/github/gitignore/blob/master/Global/macOS.gitignore .DS_Store .AppleDouble diff --git a/BRAG.md b/BRAG.md index e56e8f4d6c..284b8b0839 100644 --- a/BRAG.md +++ b/BRAG.md @@ -4,6 +4,46 @@ Here is a list of people who were happy to meet GRDB :-) --- +https://twitter.com/DamienPetrilli/status/1623922310976786433 + +> I can't overstate how much more productive I am since I ditched the obsolete Core Data framework and started to use GRDB instead. +> +> The hard stuff is doable, and the simple stuff is even simpler, as it should be. + +--- + +https://twitter.com/fullqueuedev/status/1603350969357684736 + +> Yes, I much prefer GRDB. I’ve tested many SQLite wrappers over the years 💜 + +--- + +https://twitter.com/cassiuspacheco/status/1574948903857770497 + +> SQLite using @groue’s GRDB, no brainer! Awesome documentation, support and beautifully designed. + +--- + +https://twitter.com/simrandotdev/status/1552004471122587649 + +> We get Core Data from Apple which is the worst library to deal with. +> Then there is Realm which is nice but again you do not get the RAW SQL power as it is its own type of Db. +> Then comes the most beautiful library every written which is very close to Room called GRDB. + +--- + +https://twitter.com/bellebcooper/status/1534070340850094080 + +> When I first started using GRDB I didn't know SQL or much at all about databases 😬 So I've always really liked the query API. But these days I really appreciate that you support raw SQL too, because it gives me more control and freedom when I need it. 👍👍👍 + +--- + +https://twitter.com/mikeash/status/1504810545429499909 + +> Another cool library using SQL interpolation! + +--- + https://twitter.com/swiftkarim/status/1354753451377483781 > I cannot recommend GRDB enough. It works well with Swift and eliminates whole classes of potential programming errors that can be made with Core Data. Also being able to use raw SQL queries if needed can be extremely useful! diff --git a/CHANGELOG.md b/CHANGELOG.md index 46603ad425..2aa617fe66 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,22 +7,43 @@ GRDB adheres to [Semantic Versioning](https://semver.org/), with one exception: #### 6.x Releases -- `6.6.x` Releases - [6.6.0](#660) +- `6.27.x` Releases - [6.27.0](#6270) +- `6.26.x` Releases - [6.26.0](#6260) +- `6.25.x` Releases - [6.25.0](#6250) +- `6.24.x` Releases - [6.24.0](#6240) - [6.24.1](#6241) - [6.24.2](#6242) +- `6.23.x` Releases - [6.23.0](#6230) +- `6.22.x` Releases - [6.22.0](#6220) +- `6.21.x` Releases - [6.21.0](#6210) +- `6.20.x` Releases - [6.20.0](#6200) - [6.20.1](#6201) - [6.20.2](#6202) +- `6.19.x` Releases - [6.19.0](#6190) +- `6.18.x` Releases - [6.18.0](#6180) +- `6.17.x` Releases - [6.17.0](#6170) +- `6.16.x` Releases - [6.16.0](#6160) +- `6.15.x` Releases - [6.15.0](#6150) - [6.15.1](#6151) +- `6.14.x` Releases - [6.14.0](#6140) +- `6.13.x` Releases - [6.13.0](#6130) +- `6.12.x` Releases - [6.12.0](#6120) +- `6.11.x` Releases - [6.11.0](#6110) +- `6.10.x` Releases - [6.10.0](#6100) - [6.10.1](#6101) - [6.10.2](#6102) +- `6.9.x` Releases - [6.9.0](#690) - [6.9.1](#691) - [6.9.2](#692) +- `6.8.x` Releases - [6.8.0](#680) +- `6.7.x` Releases - [6.7.0](#670) +- `6.6.x` Releases - [6.6.0](#660) - [6.6.1](#661) - `6.5.x` Releases - [6.5.0](#650) - `6.4.x` Releases - [6.4.0](#640) - `6.3.x` Releases - [6.3.0](#630) - [6.3.1](#631) - `6.2.x` Releases - [6.2.0](#620) - `6.1.x` Releases - [6.1.0](#610) - `6.0.x` Releases - [6.0.0](#600) -- `6.0.0` Betas - [6.0.0-beta](#600-beta) | [6.0.0-beta.2](#600-beta2) | [6.0.0-beta.3](#600-beta3) | [6.0.0-beta.4](#600-beta4) +- `6.0.0` Betas - [6.0.0-beta](#600-beta) - [6.0.0-beta.2](#600-beta2) - [6.0.0-beta.3](#600-beta3) - [6.0.0-beta.4](#600-beta4) #### 5.x Releases - `5.26.x` Releases - [5.26.0](#5260) - [5.26.1](#5261) - `5.25.x` Releases - [5.25.0](#5250) -- `5.24.x` Releases - [5.24.0](#5240) | [5.24.1](#5241) +- `5.24.x` Releases - [5.24.0](#5240) - [5.24.1](#5241) - `5.23.x` Releases - [5.23.0](#5230) -- `5.22.x` Releases - [5.22.0](#5220) | [5.22.1](#5221) | [5.22.2](#5222) +- `5.22.x` Releases - [5.22.0](#5220) - [5.22.1](#5221) - [5.22.2](#5222) - `5.21.x` Releases - [5.21.0](#5210) - `5.20.x` Releases - [5.20.0](#5200) - `5.19.x` Releases - [5.19.0](#5190) @@ -37,41 +58,41 @@ GRDB adheres to [Semantic Versioning](https://semver.org/), with one exception: - `5.10.x` Releases - [5.10.0](#5100) - `5.9.x` Releases - [5.9.0](#590) - `5.8.x` Releases - [5.8.0](#580) -- `5.7.x` Releases - [5.7.0](#570) | [5.7.1](#571) | [5.7.2](#572) | [5.7.3](#573) | [5.7.4](#574) +- `5.7.x` Releases - [5.7.0](#570) - [5.7.1](#571) - [5.7.2](#572) - [5.7.3](#573) - [5.7.4](#574) - `5.6.x` Releases - [5.6.0](#560) - `5.5.x` Releases - [5.5.0](#550) - `5.4.x` Releases - [5.4.0](#540) - `5.3.x` Releases - [5.3.0](#530) - `5.2.x` Releases - [5.2.0](#520) - `5.1.x` Releases - [5.1.0](#510) -- `5.0.x` Releases - [5.0.0](#500) | [5.0.1](#501) | [5.0.2](#502) | [5.0.3](#503) -- `5.0.0` Betas - [5.0.0-beta](#500-beta) | [5.0.0-beta.2](#500-beta2) | [5.0.0-beta.3](#500-beta3) | [5.0.0-beta.4](#500-beta4) | [5.0.0-beta.5](#500-beta5) | [5.0.0-beta.6](#500-beta6) | [5.0.0-beta.7](#500-beta7) | [5.0.0-beta.8](#500-beta8) | [5.0.0-beta.9](#500-beta9) | [5.0.0-beta.10](#500-beta10) | [5.0.0-beta.11](#500-beta11) +- `5.0.x` Releases - [5.0.0](#500) - [5.0.1](#501) - [5.0.2](#502) - [5.0.3](#503) +- `5.0.0` Betas - [5.0.0-beta](#500-beta) - [5.0.0-beta.2](#500-beta2) - [5.0.0-beta.3](#500-beta3) - [5.0.0-beta.4](#500-beta4) - [5.0.0-beta.5](#500-beta5) - [5.0.0-beta.6](#500-beta6) - [5.0.0-beta.7](#500-beta7) - [5.0.0-beta.8](#500-beta8) - [5.0.0-beta.9](#500-beta9) - [5.0.0-beta.10](#500-beta10) - [5.0.0-beta.11](#500-beta11) #### 4.x Releases - `4.14.x` Releases - [4.14.0](#4140) - `4.13.x` Releases - [4.13.0](#4130) -- `4.12.x` Releases - [4.12.0](#4120) | [4.12.1](#4121) | [4.12.2](#4122) +- `4.12.x` Releases - [4.12.0](#4120) - [4.12.1](#4121) - [4.12.2](#4122) - `4.11.x` Releases - [4.11.0](#4110) - `4.10.x` Releases - [4.10.0](#4100) - `4.9.x` Releases - [4.9.0](#490) -- `4.8.x` Releases - [4.8.0](#480) | [4.8.1](#481) +- `4.8.x` Releases - [4.8.0](#480) - [4.8.1](#481) - `4.7.x` Releases - [4.7.0](#470) -- `4.6.x` Releases - [4.6.0](#460) | [4.6.1](#461) | [4.6.2](#462) +- `4.6.x` Releases - [4.6.0](#460) - [4.6.1](#461) - [4.6.2](#462) - `4.5.x` Releases - [4.5.0](#450) - `4.4.x` Releases - [4.4.0](#440) - `4.3.x` Releases - [4.3.0](#430) -- `4.2.x` Releases - [4.2.0](#420) | [4.2.1](#421) -- `4.1.x` Releases - [4.1.0](#410) | [4.1.1](#411) -- `4.0.x` Releases - [4.0.0](#400) | [4.0.1](#401) +- `4.2.x` Releases - [4.2.0](#420) - [4.2.1](#421) +- `4.1.x` Releases - [4.1.0](#410) - [4.1.1](#411) +- `4.0.x` Releases - [4.0.0](#400) - [4.0.1](#401) #### 3.x Releases - `3.7.x` Releases - [3.7.0](#370) -- `3.6.x` Releases - [3.6.0](#360) | [3.6.1](#361) | [3.6.2](#362) +- `3.6.x` Releases - [3.6.0](#360) - [3.6.1](#361) - [3.6.2](#362) - `3.5.x` Releases - [3.5.0](#350) - `3.4.x` Releases - [3.4.0](#340) -- `3.3.x` Releases - [3.3.0](#330) | [3.3.1](#331) +- `3.3.x` Releases - [3.3.0](#330) - [3.3.1](#331) - `3.3.0` Betas - [3.3.0-beta1](#330-beta1) - `3.2.x` Releases - [3.2.0](#320) - `3.1.x` Releases - [3.1.0](#310) @@ -83,18 +104,18 @@ GRDB adheres to [Semantic Versioning](https://semver.org/), with one exception: - `2.9.x` Releases - [2.9.0](#290) - `2.8.x` Releases - [2.8.0](#280) - `2.7.x` Releases - [2.7.0](#270) -- `2.6.x` Releases - [2.6.0](#260) | [2.6.1](#261) +- `2.6.x` Releases - [2.6.0](#260) - [2.6.1](#261) - `2.5.x` Releases - [2.5.0](#250) -- `2.4.x` Releases - [2.4.0](#240) | [2.4.1](#241) | [2.4.2](#242) -- `2.3.x` Releases - [2.3.0](#230) | [2.3.1](#231) +- `2.4.x` Releases - [2.4.0](#240) - [2.4.1](#241) - [2.4.2](#242) +- `2.3.x` Releases - [2.3.0](#230) - [2.3.1](#231) - `2.2.x` Releases - [2.2.0](#220) - `2.1.x` Releases - [2.1.0](#210) -- `2.0.x` Releases - [2.0.0](#200) | [2.0.1](#201) | [2.0.2](#202) | [2.0.3](#203) +- `2.0.x` Releases - [2.0.0](#200) - [2.0.1](#201) - [2.0.2](#202) - [2.0.3](#203) #### 1.x Releases - `1.3.x` Releases - [1.3.0](#130) -- `1.2.x` Releases - [1.2.0](#120) | [1.2.1](#121) | [1.2.2](#122) +- `1.2.x` Releases - [1.2.0](#120) - [1.2.1](#121) - [1.2.2](#122) - `1.1.x` Releases - [1.1.0](#110) - `1.0.x` Releases - [1.0.0](#100) @@ -104,6 +125,251 @@ GRDB adheres to [Semantic Versioning](https://semver.org/), with one exception: --- +## 6.27.0 + +Released April 21, 2024 + +- **Fixed**: [#1533](https://github.com/groue/GRDB.swift/pull/1533) by [@groue](https://github.com/groue): Fix a bug in Decodable support +- **Documentation Update**: [#1534](https://github.com/groue/GRDB.swift/pull/1534) The [Single-Row Tables](Documentation/SingleRowTables.md) guide was updated with guidance about default configuration values. +- **Documentation Update**: [#1535](https://github.com/groue/GRDB.swift/pull/1535) The [ValueObservation Scheduling](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation#ValueObservation-Scheduling) documentation chapter explains the default behavior of `ValueObservation` fetches, and explains how to make sure they are never performed on the main thread. + +## 6.26.0 + +Released March 23, 2024 + +- **New**: [#1503](https://github.com/groue/GRDB.swift/pull/1503) by [@simba909](https://github.com/simba909): Conform Database.ColumnType to Sendable +- **New**: [#1510](https://github.com/groue/GRDB.swift/pull/1510) by [@groue](https://github.com/groue): Add Sendable conformances and unavailabilities +- **New**: [#1511](https://github.com/groue/GRDB.swift/pull/1511) by [@groue](https://github.com/groue): Database schema dump +- **New**: [#1515](https://github.com/groue/GRDB.swift/pull/1515) by [@groue](https://github.com/groue): Support for the CAST SQLite function +- **Fixed**: [#1508](https://github.com/groue/GRDB.swift/pull/1508) by [@groue](https://github.com/groue): Fix ValueObservation mishandling of database schema modification +- **Fixed**: [#1512](https://github.com/groue/GRDB.swift/issues/1512): Decoding errors are now correctly reported when decoding NULL into a non-optional property of type `Data` or `Date`. + +## 6.25.0 + +Released February 25, 2024 + +- **New**: [#1496](https://github.com/groue/GRDB.swift/pull/1496) by [@danielepantaleone](https://github.com/danielepantaleone): Add privacy manifest file. + + A `PrivacyInfo.xcprivacy` resource was added to the GRDB SPM package and Xcode projects. It declares that GRDB does not collect anything. + +## 6.24.2 + +Released January 21, 2024 + +- **Documentation Update**: [#1485](https://github.com/groue/GRDB.swift/pull/1485) The [Sharing a Database](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasesharing) guide was updated with a new recommendation for databases shared between multiple processes. Writers should always perform IMMEDIATE transactions in order to avoid the `SQLITE_BUSY` error that can occur when transactions overlap. The new recommendation fits in a single line of code: `configuration.defaultTransactionKind = .immediate`. +- **New**: Associations that involve views instead of tables were already supported, with an explicit `ForeignKey` in their definition. When the foreign key is missing, a clear diagnostic message is now emitted, instead of an unhelpful "no such table" runtime error. + +## 6.24.1 + +Released January 6, 2024 + +- **New**: [#1477](https://github.com/groue/GRDB.swift/pull/1477): Remove shadow tables from database dump + +## 6.24.0 + +Released January 6, 2024 + +- **New**: [#1466](https://github.com/groue/GRDB.swift/pull/1466) by [@barnettben](https://github.com/barnettben): Add schema name option to database introspection methods + +## 6.23.0 + +Released December 1, 2023 + +- **New**: [#1462](https://github.com/groue/GRDB.swift/pull/1462) Temporary read-only access + +## 6.22.0 + +Released November 26, 2023 + +- **New**: [#1452](https://github.com/groue/GRDB.swift/pull/1452) by [@groue](https://github.com/groue): SQLite 3.44.0, FILTER and ORDER BY clauses in aggregate functions +- **New**: [#1460](https://github.com/groue/GRDB.swift/pull/1460) by [@groue](https://github.com/groue): Explicit change notifications help applications deal with undetected database changes. +- **Documentation Update**: The documentations of [`ValueObservation`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation), [`DatabaseRegionObservation`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseregionobservation), and [`TransactionObserver`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/transactionobserver) have a new "Dealing with Undetected Changes" that documents possible strategies for notifying applications of undetected database changes. + +## 6.21.0 + +Released October 29, 2023 + +- **New**: [#1448](https://github.com/groue/GRDB.swift/pull/1448) by [@groue](https://github.com/groue): Add support for stable ordering and dump of views +- **New**: [#1449](https://github.com/groue/GRDB.swift/pull/1449) by [@groue](https://github.com/groue): Backport temporary copies from GRDBSnapshotTesting + +## 6.20.2 + +Released October 15, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.20.1...v6.20.2) + +- **Fixed**: [#1442](https://github.com/groue/GRDB.swift/pull/1442) by [@groue](https://github.com/groue): Extend the macOS availability of JSON functions + +## 6.20.1 + +Released October 13, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.20.0...v6.20.1) + +- **Fixed**: Removed a debug print + +## 6.20.0 + +Released October 13, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.19.0...v6.20.0) + +- **New**: [#1439](https://github.com/groue/GRDB.swift/pull/1439) by [@groue](https://github.com/groue): Dump requests +- **New**: `QueryInterfaceRequest.withStableOrder()` returns a request with well-defined order, suitable for tests. + +## 6.19.0 + +Released October 4, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.18.0...v6.19.0) + +- **New**: [#1429](https://github.com/groue/GRDB.swift/pull/1429) by [@JhonnyBillM](https://github.com/JhonnyBillM): Allow `DatabaseValueConvertible` types to customize their database JSON format +- **New**: [#1436](https://github.com/groue/GRDB.swift/pull/1436) by [@myyra](https://github.com/myyra) and [@groue](https://github.com/groue): JSON functions +- **New**: `Database` has learned to create indexes on expressions, and specify specific collations on indexed columns, with the `create(index:on:expressions:options:condition:)` method. +- **New**: Codable records can specify coding strategies for their `Data` properties. See [#1436](https://github.com/groue/GRDB.swift/pull/1436) for more information. +- **Documentation Update**: A new [JSON Support](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/json) article provides an overview of JSON handling. +- **Documentation Update**: The [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) now recommend that record types with an auto-incremented id avoid conforming to the `Identifiable` protocol. + +## 6.18.0 + +Released September 1, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.17.0...v6.18.0) + +- **New**: Expose the database path from `DatabaseReader`. + +## 6.17.0 + +Released August 26, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.16.0...v6.17.0) + +- **New**: [#1421](https://github.com/groue/GRDB.swift/pull/1421) by [@groue](https://github.com/groue): Make it possible to open a DatabaseQueue in the WAL mode + +## 6.16.0 + +Released July 9, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.15.1...v6.16.0) + +- **New**: [#1397](https://github.com/groue/GRDB.swift/pull/1397) by [@groue](https://github.com/groue): Swift API for creating database views +- **New**: [#1401](https://github.com/groue/GRDB.swift/pull/1401) by [@kustra](https://github.com/kustra): Linux compilation fixes +- **New**: [#1402](https://github.com/groue/GRDB.swift/pull/1402) by [@groue](https://github.com/groue): Upgrade custom SQLite builds to 3.42.0 +- **New**: [#1403](https://github.com/groue/GRDB.swift/pull/1403) by [@groue](https://github.com/groue): GitHub CI: test Xcode 14.3.1, macOS 13 +- **New**: :star: [#1405](https://github.com/groue/GRDB.swift/pull/1405) Simplify the declaration of BelongsTo associations in the database schema +- **Documentation Update**: The documentation was updated for the new recommended way to declare associations in the database schema, with the `belongsTo()` method introduced by [#1405](https://github.com/groue/GRDB.swift/pull/1405): + - [`belongsTo(_:inTable:onDelete:onUpdate:deferred:indexed:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tabledefinition/belongsto(_:intable:ondelete:onupdate:deferred:indexed:)) + - [The Database Schema](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseschema) + - [Migrations](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/migrations) + - [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) + - [Associations](Documentation/AssociationsBasics.md) + - [`BelongsToAssociation`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/belongstoassociation) + - [`HasManyAssociation`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/hasmanyassociation) + - [`HasOneAssociation`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/hasoneassociation) + + +## 6.15.1 + +Released June 17, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.15.0...v6.15.1) + +- **Fixed**: Fixed Xcode 15 warnings +- **Fixed**: [#1391](https://github.com/groue/GRDB.swift/pull/1391) Fix upsert in FTS5-synchronized tables + + +## 6.15.0 + +Released June 2, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.14.0...v6.15.0) + +- **New**: [#1382](https://github.com/groue/GRDB.swift/pull/1382) Add `DerivableRequest.all()` +- **New**: [#1384](https://github.com/groue/GRDB.swift/pull/1384) Allow ValueObservation to start from a truncated wal file +- **Documentation Update**: The [ValueObservation Performance](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation#ValueObservation-Performance) documentation chapter explains how truncating WAL checkpoints impact `ValueObservation`. + + +## 6.14.0 + +Released May 25, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.13.0...v6.14.0) + +- **New**: [#1376](https://github.com/groue/GRDB.swift/pull/1376) Bitwise operations +- **New**: [#1379](https://github.com/groue/GRDB.swift/pull/1379) Create indexes with a default name + + +## 6.13.0 + +Released May 15, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.12.0...v6.13.0) + +- **New**: [#1373](https://github.com/groue/GRDB.swift/pull/1373) Deprecate string literal arguments for `TableDefinition.check()` + + +## 6.12.0 + +Released April 29, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.11.0...v6.12.0) + +- **Fixed**: [#1364](https://github.com/groue/GRDB.swift/pull/1364) Fix a regression introduced with version 6.10.0, where starting multiple observations while performing writes could create a deadlock. +- **New**: `FetchableRecordDecoder` is an object that decodes `Decodable` fetchable records from database rows. You may want to use this decoder when you want to use the `Decodable` initializer from `init(row: Row)`. + +## 6.11.0 + +Released April 17, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.10.2...v6.11.0) + +- **New**: [#1361](https://github.com/groue/GRDB.swift/pull/1361) `DatabaseMigrator.eraseDatabaseOnSchemaChange` ignores internal schema objects + +## 6.10.2 + +Released April 10, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.10.1...v6.10.2) + +- **Documentation Update**: The [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) guide has moved to DocC. +- **Documentation Update**: The [Demo Applications](Documentation/DemoApps/) demonstrate how their `AppDatabase` "database manager" can provide a base database configuration. + +## 6.10.1 + +Released April 1, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.10.0...v6.10.1) + +- **Fixed**: [#1357](https://github.com/groue/GRDB.swift/pull/1357) `QueryInterfaceRequest.fetchCount` no longer executes invalid SQL queries for some requests. + +## 6.10.0 + +Released March 20, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.9.2...v6.10.0) + +- **New**: [#1350](https://github.com/groue/GRDB.swift/pull/1350) by [@groue](https://github.com/groue): DatabasePool won't close read-only connections if requested, and ValueObservation no longer opens a new database connection when it starts. + +## 6.9.2 + +Released March 14, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.9.1...v6.9.2) + +- **Fixed**: [#1349](https://github.com/groue/GRDB.swift/pull/1349) by [@JonLz](https://github.com/JonLz): Fix Migrations.md example so it compiles +- **Fixed**: Avoid the risk for pathologically deep stack trace, or stack overflow, introduced by [#1348](https://github.com/groue/GRDB.swift/pull/1348). + +## 6.9.1 + +Released March 12, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.9.0...v6.9.1) + +- **Documentation Updates**: Moved more README chapters into DocC: + - [`RowAdapter`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/rowadapter) + - [`splittingRowAdapters(columnCounts:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/splittingrowadapters(columncounts:)) + +## 6.9.0 + +Released March 12, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.8.0...v6.9.0) + +- **New**: [#1348](https://github.com/groue/GRDB.swift/pull/1348) by [@groue](https://github.com/groue): SQLite argument binding optimization +- **New**: `FTS5.api(db)` returns a pointer to the `fts5_api` structure, useful for low-level [FTS5 customization](https://www.sqlite.org/fts5.html#extending_fts5). +- **Documentation Updates**: Moved more README chapters into DocC, enhanced and extended DocC articles: + - [Database Connections](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections) + - [Prepared Statements](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statement) + - [DatabaseValueConvertible](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible) + +## 6.8.0 + +Released February 24, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.7.0...v6.8.0) + +- **New**: [#1338](https://github.com/groue/GRDB.swift/pull/1338) by [@groue](https://github.com/groue): TimestampedRecord sample code +- **New**: `EncodableRecord.databaseChanges(modify:)` modifies a record and returns a dictionary of applied changes. +- **Documentation Update**: The [Record Timestamps and Transaction Date](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordtimestamps) article provides a sample `TimestampedRecord` protocol that application may adapt for their own use. + +## 6.7.0 + +Released February 19, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.6.1...v6.7.0) + +- **Fixed**: [#1316](https://github.com/groue/GRDB.swift/pull/1316) and [#1320](https://github.com/groue/GRDB.swift/pull/1320) by [@baekteun](https://github.com/baekteun): Replace "OSX" with "macOS". +- **Fixed**: [#1328](https://github.com/groue/GRDB.swift/pull/1328) by [@ytti](https://github.com/ytti): Fix documentation about Data passphrases. +- **Fixed**: [#1327](https://github.com/groue/GRDB.swift/pull/1327) by [@groue](https://github.com/groue): Remove dependency on any specific SwiftLint version. +- **New**: [#1331](https://github.com/groue/GRDB.swift/pull/1331) by [@groue](https://github.com/groue): Transaction Date +- **New**: [#1336](https://github.com/groue/GRDB.swift/pull/1336) by [@groue](https://github.com/groue): Enable FTS5 by default +- **Documentation Update**: The new [Record Timestamps and Transaction Date](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordtimestamps) article explains how applications can save the creation and modification date of records. +- Added support for `Table` in SQL interpolation. + +## 6.6.1 + +Released January 15, 2023 • [diff](https://github.com/groue/GRDB.swift/compare/v6.6.0...v6.6.1) + +- **Fixed**: [#1317](https://github.com/groue/GRDB.swift/pull/1317) by [@groue](https://github.com/groue): Fix handling of multiple deep prefetched associations + ## 6.6.0 Released December 29, 2022 • [diff](https://github.com/groue/GRDB.swift/compare/v6.5.0...v6.6.0) @@ -3654,7 +3920,7 @@ It comes with breaking changes, but the good news is that they are the last (unt As a matter of fact, GRDB 1.0 still supports Xcode 8.1 and Swift 3.0. But future versions are free to use Swift 3.1 features, and will require Xcode 8.3+. - The targetted operating systems are unchanged: iOS 8.0+ / OSX 10.9+ / watchOS 2.0+ + The targetted operating systems are unchanged: iOS 8.0+ / macOS 10.9+ / watchOS 2.0+ - **[Record types](https://github.com/groue/GRDB.swift#records) have their `persistentDictionary` property replaced with the `encode(to:)` method:** @@ -5462,11 +5728,11 @@ Released May 17, 2016 **Fixed** -- Restored support for iOS before 8.2 and OS X before 10.10. Fixes [#51](https://github.com/groue/GRDB.swift/issues/51). +- Restored support for iOS before 8.2 and macOS before 10.10. Fixes [#51](https://github.com/groue/GRDB.swift/issues/51). **Breaking Changes** -- Support for advanced migrations is not available until iOS 8.2 and OS X 10.10: +- Support for advanced migrations is not available until iOS 8.2 and macOS 10.10: ```diff struct DatabaseMigrator { @@ -5522,7 +5788,7 @@ Released May 10, 2016 **New** -- `FetchedRecordsController` is now exposed in OSX CocoaPods framework ([documentation](https://github.com/groue/GRDB.swift#fetchedrecordscontroller)) +- `FetchedRecordsController` is now exposed in macOS CocoaPods framework ([documentation](https://github.com/groue/GRDB.swift#fetchedrecordscontroller)) **Fixed** @@ -5655,7 +5921,7 @@ Released April 5, 2016 **Fixed** -- Restored CocoaPods support for iOS 8+ and OS X 10.9+ +- Restored CocoaPods support for iOS 8+ and macOS 10.9+ ## 0.56.0 @@ -5698,7 +5964,7 @@ Released March 31, 2016 Released March 29, 2016 -This release restores CocoaPods support for iOS 9.0+ and OSX 10.11+. We'll try to bring back CocoaPods support for iOS 8.0+ or OSX 10.9+ in a further release. +This release restores CocoaPods support for iOS 9.0+ and macOS 10.11+. We'll try to bring back CocoaPods support for iOS 8.0+ or macOS 10.9+ in a further release. ## 0.54.0 @@ -6597,7 +6863,7 @@ Released August 25, 2015 **Fixed** -- Reduced iOS Deployment Target to 8.0, and OSX Deployment Target to 10.9. +- Reduced iOS Deployment Target to 8.0, and macOS Deployment Target to 10.9. - `DatabaseQueue.inTransaction()` is now declared as `rethrows`. **Breaking changes** diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 79f129cfec..eab1679cbe 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,7 +61,7 @@ How you can Contribute - Get familiar with the [Swift API Design Guidelines](https://www.swift.org/documentation/api-design-guidelines/). - Spaces, not tabs. - Whitespace-only lines are not trimmed. - - Documentation comments are hard-wrapped at column 80 (Xcode > Preferences > Text Editing > Display > [X] Page guide at column: 80). + - Documentation comments are hard-wrapped at column 76 (Xcode > Preferences > Text Editing > Display > [X] Page guide at column: 76). - No Swiftlint warning after a build. @@ -69,7 +69,7 @@ How you can Contribute GRDB documentation is provided as a DocC reference, and guides ([README.md](README.md) and the [Documentation](Documentation) folder). - Please keep the reference and the guides up-to-date. Use Xcode > Product > Build Documentation in order to control the quality of your reference documentation. + Please keep the reference and the guides up-to-date. To control the quality of your DocC reference documentation, close the workspace, open `Package.swift` in Xcode, and use Product > Build Documentation. GRDB is "documentation-driven", which means that nothing ships until it is supported by documentation that makes sense. Documentation makes sense when someone who is not you is able to figure out what is the purpose of your contribution, how to use it, and what are its eventual caveats and corner cases. When the documentation is hard to write, or reveals too many caveats, it is the sign that the api needs to be fixed. diff --git a/Documentation/AssociationsBasics.md b/Documentation/AssociationsBasics.md index 7f8158d44e..50e241674f 100644 --- a/Documentation/AssociationsBasics.md +++ b/Documentation/AssociationsBasics.md @@ -130,7 +130,7 @@ let request = Book.including(optional: Book.author) let bookInfos = BookInfo.fetchAll(db, request) ``` -Before we dive in, please remember that associations can not generate all possible SQL queries that involve several tables. You may also *prefer* writing SQL, and this is just OK, because your SQL skills are welcome: see the [Joined Queries Support](../README.md#joined-queries-support) chapter. +Before we dive in, please remember that associations can not generate all possible SQL queries that involve several tables. You may also *prefer* writing SQL, and this is just OK, because your SQL skills are welcome. The [`splittingRowAdapters(columnCounts:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/splittingrowadapters(columncounts:)) method can help you consume the rows fetched from joined queries, as in `SELECT book.*, author.* FROM ...`. ## Required Protocols @@ -429,15 +429,13 @@ The matching [migration] would look like: migrator.registerMigration("Employees") { db in try db.create(table: "employee") { t in t.autoIncrementedPrimaryKey("id") - t.column("managerId", .integer) - .indexed() - .references("employee", onDelete: .restrict) + t.belongsTo("manager", inTable: "employee", onDelete: .setNull) t.column("name", .text) } } ``` -Note that both sides of the self-join use a customized **[association key](#the-structure-of-a-joined-request)**. This helps consuming this association. For example: +Note that the associations on both sides of the self-join use a customized **[association key](#the-structure-of-a-joined-request)**. This helps consuming this association. For example: ```swift struct EmployeeInfo: FetchableRecord, Decodable { @@ -536,20 +534,16 @@ migrator.registerMigration("Books and Authors") { db in } try db.create(table: "book") { t in t.autoIncrementedPrimaryKey("id") - t.column("authorId", .integer) // (2) + t.belongsTo("author", onDelete: .cascade) // (2) .notNull() // (3) - .indexed() // (4) - .references("author", onDelete: .cascade) // (5) t.column("title", .text) } } ``` 1. The `author` table has a primary key. -2. The `book.authorId` column is used to link a book to the author it belongs to. +2. The `book.authorId` column is used to link a book to the author it belongs to. This column is indexed in order to ease the selection of an author's books. A foreign key is defined from `book.authorId` column to `authors.id`, so that SQLite guarantees that no book refers to a missing author. The `onDelete: .cascade` option has SQLite automatically delete all of an author's books when that author is deleted. See [Foreign Key Actions] for more information. 3. Make the `book.authorId` column not null if you want SQLite to guarantee that all books have an author. -4. Create an index on the `book.authorId` column in order to ease the selection of an author's books. -5. Create a foreign key from `book.authorId` column to `authors.id`, so that SQLite guarantees that no book refers to a missing author. The `onDelete: .cascade` option has SQLite automatically delete all of an author's books when that author is deleted. See [Foreign Key Actions] for more information. The example above uses auto-incremented primary keys. But generally speaking, all primary keys are supported, including composite primary keys that span several columns. @@ -595,20 +589,16 @@ migrator.registerMigration("Books and Authors") { db in } try db.create(table: "book") { t in t.autoIncrementedPrimaryKey("id") - t.column("authorId", .integer) // (2) + t.belongsTo("author", onDelete: .cascade) // (2) .notNull() // (3) - .indexed() // (4) - .references("author", onDelete: .cascade) // (5) t.column("title", .text) } } ``` 1. The `author` table has a primary key. -2. The `book.authorId` column is used to link a book to the author it belongs to. +2. The `book.authorId` column is used to link a book to the author it belongs to. This column is indexed in order to ease the selection of an author's books. A foreign key is defined from `book.authorId` column to `authors.id`, so that SQLite guarantees that no book refers to a missing author. The `onDelete: .cascade` option has SQLite automatically delete all of an author's books when that author is deleted. See [Foreign Key Actions] for more information. 3. Make the `book.authorId` column not null if you want SQLite to guarantee that all books have an author. -4. Create an index on the `book.authorId` column in order to ease the selection of an author's books. -5. Create a foreign key from `book.authorId` column to `authors.id`, so that SQLite guarantees that no book refers to a missing author. The `onDelete: .cascade` option has SQLite automatically delete all of an author's books when that author is deleted. See [Foreign Key Actions] for more information. The example above uses auto-incremented primary keys. But generally speaking, all primary keys are supported, including composite primary keys that span several columns. @@ -654,10 +644,9 @@ migrator.registerMigration("Countries") { db in } try db.create(table: "demographics") { t in t.autoIncrementedPrimaryKey("id") - t.column("countryCode", .text) // (2) + t.belongsTo("country", onDelete: .cascade) // (2) .notNull() // (3) .unique() // (4) - .references("country", onDelete: .cascade) // (5) t.column("population", .integer) t.column("density", .double) } @@ -665,10 +654,9 @@ migrator.registerMigration("Countries") { db in ``` 1. The `country` table has a primary key. -2. The `demographics.countryCode` column is used to link a demographic profile to the country it belongs to. +2. The `demographics.countryCode` column is used to link a demographic profile to the country it belongs to. This column is indexed in order to ease the selection of the demographics of a country. A foreign key is defined from `demographics.countryCode` column to `country.code`, so that SQLite guarantees that no profile refers to a missing country. The `onDelete: .cascade` option has SQLite automatically delete a profile when its country is deleted. See [Foreign Key Actions] for more information. 3. Make the `demographics.countryCode` column not null if you want SQLite to guarantee that all profiles are linked to a country. 4. Create a unique index on the `demographics.countryCode` column in order to guarantee the unicity of any country's profile. -5. Create a foreign key from `demographics.countryCode` column to `country.code`, so that SQLite guarantees that no profile refers to a missing country. The `onDelete: .cascade` option has SQLite automatically delete a profile when its country is deleted. See [Foreign Key Actions] for more information. The example above uses a string primary key for the "country" table. But generally speaking, all primary keys are supported, including composite primary keys that span several columns. @@ -720,6 +708,22 @@ Sometimes the database schema does not define any foreign key. And sometimes, th ![AmbiguousForeignKeys](https://cdn.rawgit.com/groue/GRDB.swift/master/Documentation/Images/Associations2/AmbiguousForeignKeys.svg) +```swift +// The migration that has created the above schema +migrator.registerMigration("Library") { db in + try db.create(table: "person") { t in + t.autoIncrementedPrimaryKey("id") + t.column("name", .text) + } + try db.create(table: "book") { t in + t.autoIncrementedPrimaryKey("id") + t.belongsTo("author", inTable: "person") + t.belongsTo("translator", inTable: "person") + t.column("title", .text) + } +} +``` + When this happens, associations can't be automatically inferred from the database schema. GRDB will complain with a fatal error such as "Ambiguous foreign key from book to person", or "Could not infer foreign key from book to person". Your help is needed. You have to instruct GRDB which foreign key to use: @@ -2000,7 +2004,7 @@ Each association included in the request can feed a property of the decoded reco - [Decoding a Joined Request with a Decodable Record] - [Decoding a Joined Request with FetchableRecord] - [Debugging Request Decoding] -- [Good Practices for Designing Record Types] - in this general guide about records, check out the "Compose Records" chapter. +- [Recommended Practices for Designing Record Types] - in this general guide about records, check out the "Associations" chapter. ## The Structure of a Joined Request @@ -2657,7 +2661,7 @@ Aggregates can be modified and combined with Swift operators: let request = Team.annotated(with: Team.players.min(Column("score")) ?? 0) ``` -- SQL functions `ABS` and `LENGTH` are available as the `abs` and `length` Swift functions: +- SQL functions `ABS`, `CAST`, and `LENGTH` are available as the `abs`, `cast`, and `length` Swift functions:
SQL @@ -2860,7 +2864,7 @@ extension DerivableRequest { } ``` -See [Good Practices for Designing Record Types] for more information. +See [Recommended Practices for Designing Record Types] for more information. ## Known Issues @@ -2907,8 +2911,6 @@ See [Good Practices for Designing Record Types] for more information. .including(required: Passport.citizen)) ``` -Come [discuss](http://twitter.com/groue) for more information, or if you wish to help turning those missing features into reality. - --- This documentation owns a lot to the [Active Record Associations](http://guides.rubyonrails.org/association_basics.html) guide, which is an immensely well-written introduction to database relations. Many thanks to the Rails team and contributors. @@ -2919,7 +2921,7 @@ This documentation owns a lot to the [Active Record Associations](http://guides. **GRDB** -Copyright (C) 2015-2020 Gwendal Roué +Copyright (C) 2015-2023 Gwendal Roué Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: @@ -2998,10 +3000,10 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. [Isolation of Multiple Aggregates]: #isolation-of-multiple-aggregates [DerivableRequest Protocol]: #derivablerequest-protocol [Known Issues]: #known-issues -[Row Adapters]: ../README.md#row-adapters +[Row Adapters]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/rowadapter [query interface requests]: ../README.md#requests [TableRecord]: ../README.md#tablerecord-protocol -[Good Practices for Designing Record Types]: GoodPracticesForDesigningRecordTypes.md +[Recommended Practices for Designing Record Types]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices [regular aggregating methods]: ../README.md#fetching-aggregated-values [Record class]: ../README.md#record-class [EncodableRecord]: ../README.md#persistablerecord-protocol diff --git a/Documentation/CommonTableExpressions.md b/Documentation/CommonTableExpressions.md index 6a025315fc..16373db740 100644 --- a/Documentation/CommonTableExpressions.md +++ b/Documentation/CommonTableExpressions.md @@ -362,7 +362,9 @@ And we can fetch the data that feeds our application screen: ```swift -let chatInfos: [ChatInfos] = try dbQueue.read(request.fetchAll) +let chatInfos: [ChatInfos] = try dbQueue.read { db in + try request.fetchAll(db) +} ``` > :bulb: **Tip**: the joining methods are generally type-safe: they won't allow you to join apples to oranges. This works when associations have a *precise* type. In this context, anonymous `CommonTableExpression` CTEs can work against type safety. When you want to define associations between several CTEs, and make sure the compiler will notice wrong uses of those associations, tag your common table expressions with an explicit type: `CommonTableExpression`. diff --git a/Documentation/CustomSQLiteBuilds.md b/Documentation/CustomSQLiteBuilds.md index 7b1dc090b8..c49092d476 100644 --- a/Documentation/CustomSQLiteBuilds.md +++ b/Documentation/CustomSQLiteBuilds.md @@ -3,7 +3,7 @@ Custom SQLite Builds By default, GRDB uses the version of SQLite that ships with the target operating system. -**You can build GRDB with a custom build of [SQLite 3.39.3](https://www.sqlite.org/changes.html).** +**You can build GRDB with a custom build of [SQLite 3.44.0](https://www.sqlite.org/changes.html).** A custom SQLite build can activate extra SQLite features, and extra GRDB features as well, such as support for the [FTS5 full-text search engine](../../../#full-text-search), and [SQLite Pre-Update Hooks](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/transactionobserver). diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.pbxproj b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.pbxproj index 8e7841eab2..ceb3cb987d 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.pbxproj +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.pbxproj @@ -230,6 +230,7 @@ 567C3E0E2520B6DE0011F6E9 /* Project object */ = { isa = PBXProject; attributes = { + BuildIndependentTargetsInParallel = YES; LastSwiftUpdateCheck = 1250; LastUpgradeCheck = 1200; TargetAttributes = { @@ -583,7 +584,7 @@ repositoryURL = "https://github.com/groue/GRDBQuery"; requirement = { kind = upToNextMajorVersion; - minimumVersion = 0.1.0; + minimumVersion = 0.6.0; }; }; /* End XCRemoteSwiftPackageReference section */ diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved index d42d199bf9..5a507d565e 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved @@ -5,8 +5,8 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/groue/GRDBQuery", "state" : { - "revision" : "df514f2bd74548f94e082f3233022190e594fce4", - "version" : "0.5.1" + "revision" : "aefc0d7e7a64e841da33e1c8be7429da1bd9b5d6", + "version" : "0.6.0" } } ], diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/xcshareddata/xcschemes/GRDBAsyncDemo.xcscheme b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/xcshareddata/xcschemes/GRDBAsyncDemo.xcscheme index f62d0a3863..aeb65e487a 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/xcshareddata/xcschemes/GRDBAsyncDemo.xcscheme +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo.xcodeproj/xcshareddata/xcschemes/GRDBAsyncDemo.xcscheme @@ -61,6 +61,13 @@ ReferencedContainer = "container:GRDBAsyncDemo.xcodeproj"> + + + + +/// You create an `AppDatabase` with a connection to an SQLite database +/// (see ). +/// +/// Create those connections with a configuration returned from +/// `AppDatabase/makeConfiguration(_:)`. +/// +/// For example: +/// +/// ```swift +/// // Create an in-memory AppDatabase +/// let config = AppDatabase.makeConfiguration() +/// let dbQueue = try DatabaseQueue(configuration: config) +/// let appDatabase = try AppDatabase(dbQueue) +/// ``` struct AppDatabase { - /// Creates an `AppDatabase`, and make sure the database schema is ready. + /// Creates an `AppDatabase`, and makes sure the database schema + /// is ready. + /// + /// - important: Create the `DatabaseWriter` with a configuration + /// returned by ``makeConfiguration(_:)``. init(_ dbWriter: any DatabaseWriter) throws { self.dbWriter = dbWriter try migrator.migrate(dbWriter) @@ -19,18 +36,67 @@ struct AppDatabase { /// /// See private let dbWriter: any DatabaseWriter +} + +// MARK: - Database Configuration + +extension AppDatabase { + private static let sqlLogger = OSLog(subsystem: Bundle.main.bundleIdentifier!, category: "SQL") + /// Returns a database configuration suited for `PlayerRepository`. + /// + /// SQL statements are logged if the `SQL_TRACE` environment variable + /// is set. + /// + /// - parameter base: A base configuration. + public static func makeConfiguration(_ base: Configuration = Configuration()) -> Configuration { + var config = base + + // An opportunity to add required custom SQL functions or + // collations, if needed: + // config.prepareDatabase { db in + // db.add(function: ...) + // } + + // Log SQL statements if the `SQL_TRACE` environment variable is set. + // See + if ProcessInfo.processInfo.environment["SQL_TRACE"] != nil { + config.prepareDatabase { db in + db.trace { + // It's ok to log statements publicly. Sensitive + // information (statement arguments) are not logged + // unless config.publicStatementArguments is set + // (see below). + os_log("%{public}@", log: sqlLogger, type: .debug, String(describing: $0)) + } + } + } + +#if DEBUG + // Protect sensitive information by enabling verbose debugging in + // DEBUG builds only. + // See + config.publicStatementArguments = true +#endif + + return config + } +} + +// MARK: - Database Migrations + +extension AppDatabase { /// The DatabaseMigrator that defines the database schema. /// /// See private var migrator: DatabaseMigrator { var migrator = DatabaseMigrator() - #if DEBUG +#if DEBUG // Speed up development by nuking the database when migrations change // See migrator.eraseDatabaseOnSchemaChange = true - #endif +#endif migrator.registerMigration("createPlayer") { db in // Create a table @@ -52,6 +118,7 @@ struct AppDatabase { } // MARK: - Database Access: Writes +// The write methods execute invariant-preserving database transactions. extension AppDatabase { /// A validation error that prevents some players from being saved into @@ -127,8 +194,8 @@ extension AppDatabase { } } } - - static let uiTestPlayers = [ + + private static let uiTestPlayers = [ Player(id: nil, name: "Arthur", score: 5), Player(id: nil, name: "Barbara", score: 6), Player(id: nil, name: "Craig", score: 8), @@ -137,7 +204,7 @@ extension AppDatabase { Player(id: nil, name: "Frederik", score: 2), Player(id: nil, name: "Gilbert", score: 7), Player(id: nil, name: "Henriette", score: 3)] - + func createPlayersForUITests() throws { try dbWriter.write { db in try AppDatabase.uiTestPlayers.forEach { player in diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Persistence.swift b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Persistence.swift index 7670af424a..05b0115789 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Persistence.swift +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Persistence.swift @@ -7,26 +7,31 @@ extension AppDatabase { private static func makeShared() -> AppDatabase { do { - // Pick a folder for storing the SQLite database, as well as - // the various temporary files created during normal database - // operations (https://sqlite.org/tempfiles.html). - let fileManager = FileManager() - let folderURL = try fileManager - .url(for: .applicationSupportDirectory, in: .userDomainMask, appropriateFor: nil, create: true) - .appendingPathComponent("database", isDirectory: true) - + // Apply recommendations from + // + // + // Create the "Application Support/Database" directory if needed + let fileManager = FileManager.default + let appSupportURL = try fileManager.url( + for: .applicationSupportDirectory, in: .userDomainMask, + appropriateFor: nil, create: true) + let directoryURL = appSupportURL.appendingPathComponent("Database", isDirectory: true) + // Support for tests: delete the database if requested if CommandLine.arguments.contains("-reset") { - try? fileManager.removeItem(at: folderURL) + try? fileManager.removeItem(at: directoryURL) } // Create the database folder if needed - try fileManager.createDirectory(at: folderURL, withIntermediateDirectories: true) + try fileManager.createDirectory(at: directoryURL, withIntermediateDirectories: true) - // Connect to a database on disk - // See https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections - let dbURL = folderURL.appendingPathComponent("db.sqlite") - let dbPool = try DatabasePool(path: dbURL.path) + // Open or create the database + let databaseURL = directoryURL.appendingPathComponent("db.sqlite") + NSLog("Database stored at \(databaseURL.path)") + let dbPool = try DatabasePool( + path: databaseURL.path, + // Use default AppDatabase configuration + configuration: AppDatabase.makeConfiguration()) // Create the AppDatabase let appDatabase = try AppDatabase(dbPool) @@ -59,7 +64,7 @@ extension AppDatabase { static func empty() -> AppDatabase { // Connect to an in-memory database // See https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections - let dbQueue = try! DatabaseQueue() + let dbQueue = try! DatabaseQueue(configuration: AppDatabase.makeConfiguration()) return try! AppDatabase(dbQueue) } diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Player.swift b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Player.swift index 8513cd4805..7876b049df 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Player.swift +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemo/Player.swift @@ -69,8 +69,7 @@ extension Player: Codable, FetchableRecord, MutablePersistableRecord { /// Define some player requests used by the application. /// -/// See -/// See +/// See extension DerivableRequest { /// A request of players ordered by name. /// diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/AppDatabaseTests.swift b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/AppDatabaseTests.swift index 29de329fea..b736e90774 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/AppDatabaseTests.swift +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/AppDatabaseTests.swift @@ -5,7 +5,7 @@ import GRDB class AppDatabaseTests: XCTestCase { func test_database_schema() throws { // Given an empty database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) // When we instantiate an AppDatabase _ = try AppDatabase(dbQueue) @@ -21,7 +21,7 @@ class AppDatabaseTests: XCTestCase { func test_savePlayer_inserts() async throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we save a new player @@ -35,7 +35,7 @@ class AppDatabaseTests: XCTestCase { func test_savePlayer_updates() async throws { // Given a players database that contains a player - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player = try await dbQueue.write { db in try Player(id: nil, name: "Arthur", score: 100).inserted(db) @@ -55,7 +55,7 @@ class AppDatabaseTests: XCTestCase { func test_deletePlayers() async throws { // Given a players database that contains four players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) let playerIds: [Int64] = try await dbQueue.write { db in _ = try Player(id: nil, name: "Arthur", score: 100).inserted(db) @@ -83,7 +83,7 @@ class AppDatabaseTests: XCTestCase { func test_deleteAllPlayers() async throws { // Given a players database that contains players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) try await dbQueue.write { db in _ = try Player(id: nil, name: "Arthur", score: 100).inserted(db) @@ -102,7 +102,7 @@ class AppDatabaseTests: XCTestCase { func test_refreshPlayers_populates_an_empty_database() async throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we refresh players @@ -115,7 +115,7 @@ class AppDatabaseTests: XCTestCase { func test_createRandomPlayersIfEmpty_populates_an_empty_database() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we create random players @@ -127,7 +127,7 @@ class AppDatabaseTests: XCTestCase { func test_createRandomPlayersIfEmpty_does_not_modify_a_non_empty_database() throws { // Given a players database that contains one player - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player = Player(id: nil, name: "Arthur", score: 100) try dbQueue.write { db in diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerRequestTests.swift b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerRequestTests.swift index 914a24173b..2675f82d56 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerRequestTests.swift +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerRequestTests.swift @@ -5,7 +5,7 @@ import GRDB class PlayerRequestTests: XCTestCase { func test_PlayerRequest_byName_fetches_well_ordered_players() throws { // Given a players database that contains two players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 1000) @@ -24,7 +24,7 @@ class PlayerRequestTests: XCTestCase { func test_PlayerRequest_byScore_fetches_well_ordered_players() throws { // Given a players database that contains two players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 1000) diff --git a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerTests.swift b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerTests.swift index 3b9df15065..edd8726845 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerTests.swift +++ b/Documentation/DemoApps/GRDBAsyncDemo/GRDBAsyncDemoTests/PlayerTests.swift @@ -8,7 +8,7 @@ class PlayerTests: XCTestCase { func testInsert() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) // When we insert a player @@ -23,7 +23,7 @@ class PlayerTests: XCTestCase { func testRoundtrip() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) // When we insert a player and fetch the player with the same id @@ -42,7 +42,7 @@ class PlayerTests: XCTestCase { func testOrderedByScore() throws { // Given a players database that contains players with distinct scores - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) @@ -64,7 +64,7 @@ class PlayerTests: XCTestCase { func testOrderedByScoreSortsIdenticalScoresByName() throws { // Given a players database that contains players with common scores - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) @@ -86,7 +86,7 @@ class PlayerTests: XCTestCase { func testOrderedByName() throws { // Given a players database that contains players with distinct names - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) diff --git a/Documentation/DemoApps/GRDBAsyncDemo/README.md b/Documentation/DemoApps/GRDBAsyncDemo/README.md index 6fc5a85e29..79389fdb21 100644 --- a/Documentation/DemoApps/GRDBAsyncDemo/README.md +++ b/Documentation/DemoApps/GRDBAsyncDemo/README.md @@ -13,8 +13,9 @@ The topics covered in this demo are: - How to setup a database in an iOS app. - How to define a simple [Codable Record](../../../README.md#codable-records). -- How to track database changes and animate a SwiftUI List with an async sequence built from [ValueObservation](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation). -- How to apply the recommendations of [Good Practices for Designing Record Types](../../GoodPracticesForDesigningRecordTypes.md). +- How to track database changes and animate a SwiftUI List with [ValueObservation](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation) Combine publishers. +- How to apply the recommendations of [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices). +- How to perform `async` database accesses. - How to feed SwiftUI previews with a transient database. **Files of interest:** diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.pbxproj b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.pbxproj index ccdf36191e..5435eadedb 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.pbxproj +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.pbxproj @@ -230,6 +230,7 @@ 567C3E0E2520B6DE0011F6E9 /* Project object */ = { isa = PBXProject; attributes = { + BuildIndependentTargetsInParallel = YES; LastSwiftUpdateCheck = 1250; LastUpgradeCheck = 1200; TargetAttributes = { @@ -581,7 +582,7 @@ repositoryURL = "https://github.com/groue/GRDBQuery"; requirement = { kind = upToNextMajorVersion; - minimumVersion = 0.1.0; + minimumVersion = 0.6.0; }; }; /* End XCRemoteSwiftPackageReference section */ diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved index d42d199bf9..5a507d565e 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved @@ -5,8 +5,8 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/groue/GRDBQuery", "state" : { - "revision" : "df514f2bd74548f94e082f3233022190e594fce4", - "version" : "0.5.1" + "revision" : "aefc0d7e7a64e841da33e1c8be7429da1bd9b5d6", + "version" : "0.6.0" } } ], diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/xcshareddata/xcschemes/GRDBCombineDemo.xcscheme b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/xcshareddata/xcschemes/GRDBCombineDemo.xcscheme index 2f53349357..0cbf7bf5c1 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/xcshareddata/xcschemes/GRDBCombineDemo.xcscheme +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo.xcodeproj/xcshareddata/xcschemes/GRDBCombineDemo.xcscheme @@ -61,6 +61,13 @@ ReferencedContainer = "container:GRDBCombineDemo.xcodeproj"> + + + + +/// You create an `AppDatabase` with a connection to an SQLite database +/// (see ). +/// +/// Create those connections with a configuration returned from +/// `AppDatabase/makeConfiguration(_:)`. +/// +/// For example: +/// +/// ```swift +/// // Create an in-memory AppDatabase +/// let config = AppDatabase.makeConfiguration() +/// let dbQueue = try DatabaseQueue(configuration: config) +/// let appDatabase = try AppDatabase(dbQueue) +/// ``` struct AppDatabase { - /// Creates an `AppDatabase`, and make sure the database schema is ready. + /// Creates an `AppDatabase`, and makes sure the database schema + /// is ready. + /// + /// - important: Create the `DatabaseWriter` with a configuration + /// returned by ``makeConfiguration(_:)``. init(_ dbWriter: any DatabaseWriter) throws { self.dbWriter = dbWriter try migrator.migrate(dbWriter) @@ -20,18 +36,67 @@ struct AppDatabase { /// /// See private let dbWriter: any DatabaseWriter +} + +// MARK: - Database Configuration + +extension AppDatabase { + private static let sqlLogger = OSLog(subsystem: Bundle.main.bundleIdentifier!, category: "SQL") + /// Returns a database configuration suited for `PlayerRepository`. + /// + /// SQL statements are logged if the `SQL_TRACE` environment variable + /// is set. + /// + /// - parameter base: A base configuration. + public static func makeConfiguration(_ base: Configuration = Configuration()) -> Configuration { + var config = base + + // An opportunity to add required custom SQL functions or + // collations, if needed: + // config.prepareDatabase { db in + // db.add(function: ...) + // } + + // Log SQL statements if the `SQL_TRACE` environment variable is set. + // See + if ProcessInfo.processInfo.environment["SQL_TRACE"] != nil { + config.prepareDatabase { db in + db.trace { + // It's ok to log statements publicly. Sensitive + // information (statement arguments) are not logged + // unless config.publicStatementArguments is set + // (see below). + os_log("%{public}@", log: sqlLogger, type: .debug, String(describing: $0)) + } + } + } + +#if DEBUG + // Protect sensitive information by enabling verbose debugging in + // DEBUG builds only. + // See + config.publicStatementArguments = true +#endif + + return config + } +} + +// MARK: - Database Migrations + +extension AppDatabase { /// The DatabaseMigrator that defines the database schema. /// /// See private var migrator: DatabaseMigrator { var migrator = DatabaseMigrator() - #if DEBUG +#if DEBUG // Speed up development by nuking the database when migrations change // See migrator.eraseDatabaseOnSchemaChange = true - #endif +#endif migrator.registerMigration("createPlayer") { db in // Create a table @@ -53,6 +118,7 @@ struct AppDatabase { } // MARK: - Database Access: Writes +// The write methods execute invariant-preserving database transactions. extension AppDatabase { /// A validation error that prevents some players from being saved into @@ -128,8 +194,8 @@ extension AppDatabase { } } } - - static let uiTestPlayers = [ + + private static let uiTestPlayers = [ Player(id: nil, name: "Arthur", score: 5), Player(id: nil, name: "Barbara", score: 6), Player(id: nil, name: "Craig", score: 8), @@ -138,7 +204,7 @@ extension AppDatabase { Player(id: nil, name: "Frederik", score: 2), Player(id: nil, name: "Gilbert", score: 7), Player(id: nil, name: "Henriette", score: 3)] - + func createPlayersForUITests() throws { try dbWriter.write { db in try AppDatabase.uiTestPlayers.forEach { player in diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Persistence.swift b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Persistence.swift index 7670af424a..05b0115789 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Persistence.swift +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Persistence.swift @@ -7,26 +7,31 @@ extension AppDatabase { private static func makeShared() -> AppDatabase { do { - // Pick a folder for storing the SQLite database, as well as - // the various temporary files created during normal database - // operations (https://sqlite.org/tempfiles.html). - let fileManager = FileManager() - let folderURL = try fileManager - .url(for: .applicationSupportDirectory, in: .userDomainMask, appropriateFor: nil, create: true) - .appendingPathComponent("database", isDirectory: true) - + // Apply recommendations from + // + // + // Create the "Application Support/Database" directory if needed + let fileManager = FileManager.default + let appSupportURL = try fileManager.url( + for: .applicationSupportDirectory, in: .userDomainMask, + appropriateFor: nil, create: true) + let directoryURL = appSupportURL.appendingPathComponent("Database", isDirectory: true) + // Support for tests: delete the database if requested if CommandLine.arguments.contains("-reset") { - try? fileManager.removeItem(at: folderURL) + try? fileManager.removeItem(at: directoryURL) } // Create the database folder if needed - try fileManager.createDirectory(at: folderURL, withIntermediateDirectories: true) + try fileManager.createDirectory(at: directoryURL, withIntermediateDirectories: true) - // Connect to a database on disk - // See https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections - let dbURL = folderURL.appendingPathComponent("db.sqlite") - let dbPool = try DatabasePool(path: dbURL.path) + // Open or create the database + let databaseURL = directoryURL.appendingPathComponent("db.sqlite") + NSLog("Database stored at \(databaseURL.path)") + let dbPool = try DatabasePool( + path: databaseURL.path, + // Use default AppDatabase configuration + configuration: AppDatabase.makeConfiguration()) // Create the AppDatabase let appDatabase = try AppDatabase(dbPool) @@ -59,7 +64,7 @@ extension AppDatabase { static func empty() -> AppDatabase { // Connect to an in-memory database // See https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections - let dbQueue = try! DatabaseQueue() + let dbQueue = try! DatabaseQueue(configuration: AppDatabase.makeConfiguration()) return try! AppDatabase(dbQueue) } diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Player.swift b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Player.swift index 8513cd4805..7876b049df 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Player.swift +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemo/Player.swift @@ -69,8 +69,7 @@ extension Player: Codable, FetchableRecord, MutablePersistableRecord { /// Define some player requests used by the application. /// -/// See -/// See +/// See extension DerivableRequest { /// A request of players ordered by name. /// diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/AppDatabaseTests.swift b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/AppDatabaseTests.swift index b1f82c5894..f9343671e1 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/AppDatabaseTests.swift +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/AppDatabaseTests.swift @@ -5,7 +5,7 @@ import GRDB class AppDatabaseTests: XCTestCase { func test_database_schema() throws { // Given an empty database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) // When we instantiate an AppDatabase _ = try AppDatabase(dbQueue) @@ -21,7 +21,7 @@ class AppDatabaseTests: XCTestCase { func test_savePlayer_inserts() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we save a new player @@ -34,7 +34,7 @@ class AppDatabaseTests: XCTestCase { func test_savePlayer_updates() throws { // Given a players database that contains a player - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player = Player(id: nil, name: "Arthur", score: 100) try dbQueue.write { db in @@ -55,7 +55,7 @@ class AppDatabaseTests: XCTestCase { func test_deletePlayers() throws { // Given a players database that contains four players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 200) @@ -83,7 +83,7 @@ class AppDatabaseTests: XCTestCase { func test_deleteAllPlayers() throws { // Given a players database that contains players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 200) @@ -105,7 +105,7 @@ class AppDatabaseTests: XCTestCase { func test_refreshPlayers_populates_an_empty_database() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we refresh players @@ -117,7 +117,7 @@ class AppDatabaseTests: XCTestCase { func test_createRandomPlayersIfEmpty_populates_an_empty_database() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we create random players @@ -129,7 +129,7 @@ class AppDatabaseTests: XCTestCase { func test_createRandomPlayersIfEmpty_does_not_modify_a_non_empty_database() throws { // Given a players database that contains one player - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player = Player(id: nil, name: "Arthur", score: 100) try dbQueue.write { db in diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerRequestTests.swift b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerRequestTests.swift index 403d325aed..151d8e41ee 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerRequestTests.swift +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerRequestTests.swift @@ -5,7 +5,7 @@ import GRDB class PlayerRequestTests: XCTestCase { func test_PlayerRequest_byName_fetches_well_ordered_players() throws { // Given a players database that contains two players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 1000) @@ -24,7 +24,7 @@ class PlayerRequestTests: XCTestCase { func test_PlayerRequest_byScore_fetches_well_ordered_players() throws { // Given a players database that contains two players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 1000) diff --git a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerTests.swift b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerTests.swift index bb732c60fa..4262d80235 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerTests.swift +++ b/Documentation/DemoApps/GRDBCombineDemo/GRDBCombineDemoTests/PlayerTests.swift @@ -8,7 +8,7 @@ class PlayerTests: XCTestCase { func testInsert() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) // When we insert a player @@ -23,7 +23,7 @@ class PlayerTests: XCTestCase { func testRoundtrip() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) // When we insert a player and fetch the player with the same id @@ -42,7 +42,7 @@ class PlayerTests: XCTestCase { func testOrderedByScore() throws { // Given a players database that contains players with distinct scores - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) @@ -64,7 +64,7 @@ class PlayerTests: XCTestCase { func testOrderedByScoreSortsIdenticalScoresByName() throws { // Given a players database that contains players with common scores - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) @@ -86,7 +86,7 @@ class PlayerTests: XCTestCase { func testOrderedByName() throws { // Given a players database that contains players with distinct names - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) diff --git a/Documentation/DemoApps/GRDBCombineDemo/README.md b/Documentation/DemoApps/GRDBCombineDemo/README.md index 5bb1622849..6b2ccec0f7 100644 --- a/Documentation/DemoApps/GRDBCombineDemo/README.md +++ b/Documentation/DemoApps/GRDBCombineDemo/README.md @@ -14,7 +14,7 @@ The topics covered in this demo are: - How to setup a database in an iOS app. - How to define a simple [Codable Record](../../../README.md#codable-records). - How to track database changes and animate a SwiftUI List with [ValueObservation](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation) Combine publishers. -- How to apply the recommendations of [Good Practices for Designing Record Types](../../GoodPracticesForDesigningRecordTypes.md). +- How to apply the recommendations of [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices). - How to feed SwiftUI previews with a transient database. **Files of interest:** diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS Extension/Info.plist b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS Extension/Info.plist index 831251030a..0881b8b30e 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS Extension/Info.plist +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS Extension/Info.plist @@ -25,7 +25,7 @@ NSExtensionAttributes WKAppBundleIdentifier - com.github.groue.GRDBDemoiOS.watchkitapp + com.github.groue.GRDBDemoiOS2.watchkitapp NSExtensionPointIdentifier com.apple.watchkit diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS/Info.plist b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS/Info.plist index 34395e2ee1..0478fa15f8 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS/Info.plist +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoWatchOS/Info.plist @@ -26,7 +26,7 @@ UIInterfaceOrientationPortraitUpsideDown WKCompanionAppBundleIdentifier - com.github.groue.GRDBDemoiOS + com.github.groue.GRDBDemoiOS2 WKWatchKitApp diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/project.pbxproj b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/project.pbxproj index 77396313e0..4d38142d00 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/project.pbxproj +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/project.pbxproj @@ -40,6 +40,20 @@ remoteGlobalIDString = 56B036021E8D9EBE003B6DA4; remoteInfo = GRDBDemoiOS; }; + 5642252F29A2390800D714BF /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 56B036261E8D9F79003B6DA4 /* GRDB.xcodeproj */; + proxyType = 1; + remoteGlobalIDString = DC3773F219C8CBB3004FCF85; + remoteInfo = GRDB; + }; + 5642253329A2391900D714BF /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 56B036261E8D9F79003B6DA4 /* GRDB.xcodeproj */; + proxyType = 1; + remoteGlobalIDString = DC3773F219C8CBB3004FCF85; + remoteInfo = GRDB; + }; 56606D092355931F00185962 /* PBXContainerItemProxy */ = { isa = PBXContainerItemProxy; containerPortal = 56B036261E8D9F79003B6DA4 /* GRDB.xcodeproj */; @@ -328,6 +342,7 @@ buildRules = ( ); dependencies = ( + 5642253429A2391900D714BF /* PBXTargetDependency */, ); name = "GRDBDemoWatchOS Extension"; productName = "GRDBDemoWatchOS Extension"; @@ -347,6 +362,7 @@ buildRules = ( ); dependencies = ( + 5642253029A2390800D714BF /* PBXTargetDependency */, 568E5FD81E926430002582E0 /* PBXTargetDependency */, ); name = GRDBDemoiOS; @@ -511,6 +527,16 @@ target = 56B036021E8D9EBE003B6DA4 /* GRDBDemoiOS */; targetProxy = 56185BF525B80B8900B9C30F /* PBXContainerItemProxy */; }; + 5642253029A2390800D714BF /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = GRDB; + targetProxy = 5642252F29A2390800D714BF /* PBXContainerItemProxy */; + }; + 5642253429A2391900D714BF /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + name = GRDB; + targetProxy = 5642253329A2391900D714BF /* PBXContainerItemProxy */; + }; 568E5FCE1E926430002582E0 /* PBXTargetDependency */ = { isa = PBXTargetDependency; target = 568E5FCA1E926430002582E0 /* GRDBDemoWatchOS Extension */; @@ -614,7 +640,7 @@ "@executable_path/Frameworks", "@executable_path/../../Frameworks", ); - PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS.watchkitapp.watchkitextension; + PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS2.watchkitapp.watchkitextension; PRODUCT_NAME = "${TARGET_NAME}"; SDKROOT = watchos; SKIP_INSTALL = YES; @@ -634,7 +660,7 @@ "@executable_path/Frameworks", "@executable_path/../../Frameworks", ); - PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS.watchkitapp.watchkitextension; + PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS2.watchkitapp.watchkitextension; PRODUCT_NAME = "${TARGET_NAME}"; SDKROOT = watchos; SKIP_INSTALL = YES; @@ -651,7 +677,7 @@ DEVELOPMENT_TEAM = AMD8W895CT; IBSC_MODULE = GRDBDemoWatchOS_Extension; INFOPLIST_FILE = GRDBDemoWatchOS/Info.plist; - PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS.watchkitapp; + PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS2.watchkitapp; PRODUCT_NAME = "$(TARGET_NAME)"; SDKROOT = watchos; SKIP_INSTALL = YES; @@ -668,7 +694,7 @@ DEVELOPMENT_TEAM = AMD8W895CT; IBSC_MODULE = GRDBDemoWatchOS_Extension; INFOPLIST_FILE = GRDBDemoWatchOS/Info.plist; - PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS.watchkitapp; + PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS2.watchkitapp; PRODUCT_NAME = "$(TARGET_NAME)"; SDKROOT = watchos; SKIP_INSTALL = YES; @@ -806,7 +832,7 @@ "$(inherited)", "@executable_path/Frameworks", ); - PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS; + PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS2; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Debug; @@ -823,7 +849,7 @@ "$(inherited)", "@executable_path/Frameworks", ); - PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS; + PRODUCT_BUNDLE_IDENTIFIER = com.github.groue.GRDBDemoiOS2; PRODUCT_NAME = "$(TARGET_NAME)"; }; name = Release; diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/xcshareddata/xcschemes/GRDBDemoiOS.xcscheme b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/xcshareddata/xcschemes/GRDBDemoiOS.xcscheme index 9f0ec648bf..20c4615d25 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/xcshareddata/xcschemes/GRDBDemoiOS.xcscheme +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS.xcodeproj/xcshareddata/xcschemes/GRDBDemoiOS.xcscheme @@ -61,6 +61,13 @@ ReferencedContainer = "container:GRDBDemoiOS.xcodeproj"> + + + + -final class AppDatabase { - /// Creates an `AppDatabase`, and make sure the database schema is ready. +/// You create an `AppDatabase` with a connection to an SQLite database +/// (see ). +/// +/// Create those connections with a configuration returned from +/// `AppDatabase/makeConfiguration(_:)`. +/// +/// For example: +/// +/// ```swift +/// // Create an in-memory AppDatabase +/// let config = AppDatabase.makeConfiguration() +/// let dbQueue = try DatabaseQueue(configuration: config) +/// let appDatabase = try AppDatabase(dbQueue) +/// ``` +struct AppDatabase { + /// Creates an `AppDatabase`, and makes sure the database schema + /// is ready. + /// + /// - important: Create the `DatabaseWriter` with a configuration + /// returned by ``makeConfiguration(_:)``. init(_ dbWriter: any DatabaseWriter) throws { self.dbWriter = dbWriter try migrator.migrate(dbWriter) @@ -18,18 +35,67 @@ final class AppDatabase { /// /// See private let dbWriter: any DatabaseWriter +} + +// MARK: - Database Configuration + +extension AppDatabase { + private static let sqlLogger = OSLog(subsystem: Bundle.main.bundleIdentifier!, category: "SQL") + /// Returns a database configuration suited for `PlayerRepository`. + /// + /// SQL statements are logged if the `SQL_TRACE` environment variable + /// is set. + /// + /// - parameter base: A base configuration. + public static func makeConfiguration(_ base: Configuration = Configuration()) -> Configuration { + var config = base + + // An opportunity to add required custom SQL functions or + // collations, if needed: + // config.prepareDatabase { db in + // db.add(function: ...) + // } + + // Log SQL statements if the `SQL_TRACE` environment variable is set. + // See + if ProcessInfo.processInfo.environment["SQL_TRACE"] != nil { + config.prepareDatabase { db in + db.trace { + // It's ok to log statements publicly. Sensitive + // information (statement arguments) are not logged + // unless config.publicStatementArguments is set + // (see below). + os_log("%{public}@", log: sqlLogger, type: .debug, String(describing: $0)) + } + } + } + +#if DEBUG + // Protect sensitive information by enabling verbose debugging in + // DEBUG builds only. + // See + config.publicStatementArguments = true +#endif + + return config + } +} + +// MARK: - Database Migrations + +extension AppDatabase { /// The DatabaseMigrator that defines the database schema. /// /// See private var migrator: DatabaseMigrator { var migrator = DatabaseMigrator() - #if DEBUG +#if DEBUG // Speed up development by nuking the database when migrations change // See migrator.eraseDatabaseOnSchemaChange = true - #endif +#endif migrator.registerMigration("createPlayer") { db in // Create a table @@ -51,6 +117,7 @@ final class AppDatabase { } // MARK: - Database Access: Writes +// The write methods execute invariant-preserving database transactions. extension AppDatabase { /// Saves (inserts or updates) a player. When the method returns, the diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Persistence.swift b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Persistence.swift index 0b0c44c33c..96267e6846 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Persistence.swift +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Persistence.swift @@ -7,19 +7,24 @@ extension AppDatabase { private static func makeShared() -> AppDatabase { do { - // Create a folder for storing the SQLite database, as well as - // the various temporary files created during normal database - // operations (https://sqlite.org/tempfiles.html). - let fileManager = FileManager() - let folderURL = try fileManager - .url(for: .applicationSupportDirectory, in: .userDomainMask, appropriateFor: nil, create: true) - .appendingPathComponent("database", isDirectory: true) - try fileManager.createDirectory(at: folderURL, withIntermediateDirectories: true) + // Apply recommendations from + // + // + // Create the "Application Support/Database" directory if needed + let fileManager = FileManager.default + let appSupportURL = try fileManager.url( + for: .applicationSupportDirectory, in: .userDomainMask, + appropriateFor: nil, create: true) + let directoryURL = appSupportURL.appendingPathComponent("Database", isDirectory: true) + try fileManager.createDirectory(at: directoryURL, withIntermediateDirectories: true) - // Connect to a database on disk - // See https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections - let dbURL = folderURL.appendingPathComponent("db.sqlite") - let dbPool = try DatabasePool(path: dbURL.path) + // Open or create the database + let databaseURL = directoryURL.appendingPathComponent("db.sqlite") + NSLog("Database stored at \(databaseURL.path)") + let dbPool = try DatabasePool( + path: databaseURL.path, + // Use default AppDatabase configuration + configuration: AppDatabase.makeConfiguration()) // Create the AppDatabase let appDatabase = try AppDatabase(dbPool) diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Player.swift b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Player.swift index 7c82fe2cfd..82204b62c4 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Player.swift +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOS/Player.swift @@ -68,8 +68,7 @@ extension Player: Codable, FetchableRecord, MutablePersistableRecord { /// Define some player requests used by the application. /// -/// See -/// See +/// See extension DerivableRequest { /// A request of players ordered by name. /// diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/AppDatabaseTests.swift b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/AppDatabaseTests.swift index 9815727ac6..2c359a8988 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/AppDatabaseTests.swift +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/AppDatabaseTests.swift @@ -5,7 +5,7 @@ import GRDB class AppDatabaseTests: XCTestCase { func test_database_schema() throws { // Given an empty database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) // When we instantiate an AppDatabase _ = try AppDatabase(dbQueue) @@ -21,7 +21,7 @@ class AppDatabaseTests: XCTestCase { func test_savePlayer_inserts() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we save a new player @@ -34,7 +34,7 @@ class AppDatabaseTests: XCTestCase { func test_savePlayer_updates() throws { // Given a players database that contains a player - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player = Player(id: nil, name: "Arthur", score: 100) try dbQueue.write { db in @@ -55,7 +55,7 @@ class AppDatabaseTests: XCTestCase { func test_deletePlayers() throws { // Given a players database that contains four players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 200) @@ -83,7 +83,7 @@ class AppDatabaseTests: XCTestCase { func test_deleteAllPlayers() throws { // Given a players database that contains players - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player1 = Player(id: nil, name: "Arthur", score: 100) var player2 = Player(id: nil, name: "Barbara", score: 200) @@ -105,7 +105,7 @@ class AppDatabaseTests: XCTestCase { func test_refreshPlayers_populates_an_empty_database() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we refresh players @@ -117,7 +117,7 @@ class AppDatabaseTests: XCTestCase { func test_createRandomPlayersIfEmpty_populates_an_empty_database() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) // When we create random players @@ -129,7 +129,7 @@ class AppDatabaseTests: XCTestCase { func test_createRandomPlayersIfEmpty_does_not_modify_a_non_empty_database() throws { // Given a players database that contains one player - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) let appDatabase = try AppDatabase(dbQueue) var player = Player(id: nil, name: "Arthur", score: 100) try dbQueue.write { db in diff --git a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/PlayerTests.swift b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/PlayerTests.swift index 506f5dfbc6..64d1c1d2b3 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/PlayerTests.swift +++ b/Documentation/DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/PlayerTests.swift @@ -8,7 +8,7 @@ class PlayerTests: XCTestCase { func testInsert() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) // When we insert a player @@ -23,7 +23,7 @@ class PlayerTests: XCTestCase { func testRoundtrip() throws { // Given an empty players database - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) // When we insert a player and fetch the player with the same id @@ -42,7 +42,7 @@ class PlayerTests: XCTestCase { func testOrderedByScore() throws { // Given a players database that contains players with distinct scores - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) @@ -64,7 +64,7 @@ class PlayerTests: XCTestCase { func testOrderedByScoreSortsIdenticalScoresByName() throws { // Given a players database that contains players with common scores - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) @@ -86,7 +86,7 @@ class PlayerTests: XCTestCase { func testOrderedByName() throws { // Given a players database that contains players with distinct names - let dbQueue = try DatabaseQueue() + let dbQueue = try DatabaseQueue(configuration: AppDatabase.makeConfiguration()) _ = try AppDatabase(dbQueue) var player1 = Player(id: 1, name: "Arthur", score: 100) var player2 = Player(id: 2, name: "Barbara", score: 200) diff --git a/Documentation/DemoApps/GRDBDemoiOS/README.md b/Documentation/DemoApps/GRDBDemoiOS/README.md index 098f37d796..ab854697c7 100644 --- a/Documentation/DemoApps/GRDBDemoiOS/README.md +++ b/Documentation/DemoApps/GRDBDemoiOS/README.md @@ -14,7 +14,7 @@ The topics covered in this demo are: - How to setup a database in an iOS app. - How to define a simple [Codable Record](../../../README.md#codable-records). - How to track database changes and animate a table view with [ValueObservation](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation). -- How to apply the recommendations of [Good Practices for Designing Record Types](../../GoodPracticesForDesigningRecordTypes.md). +- How to apply the recommendations of [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices). **Files of interest:** diff --git a/Documentation/FullTextSearch.md b/Documentation/FullTextSearch.md index b0c2f61a0d..69b1ae757a 100644 --- a/Documentation/FullTextSearch.md +++ b/Documentation/FullTextSearch.md @@ -28,7 +28,6 @@ let books = try Book.fetchAll(db, ``` - **[Choosing the Full-Text Engine](#choosing-the-full-text-engine)** -- **[Enabling FTS5 Support](#enabling-fts5-support)** - **Create Full-Text Virtual Tables**: [FTS3/4](#create-fts3-and-fts4-virtual-tables), [FTS5](#create-fts5-virtual-tables) - **Choosing a Tokenizer**: [FTS3/4](#fts3-and-fts4-tokenizers), [FTS5](#fts5-tokenizers) - **Tokenization**: [FTS3/4](#fts3-and-fts4-tokenization), [FTS5](#fts5-tokenization) @@ -83,8 +82,6 @@ Generally speaking, FTS5 is better than FTS4 which improves on FTS3. But this do - **The location of the indexed text in your database schema.** Only FTS4 and FTS5 support "contentless" and "external content" tables. -- **The SQLite library integrated in your application.** The version of SQLite that ships with iOS, macOS, tvOS and watchOS supports FTS3 and FTS4 out of the box, but not always FTS5. To use FTS5, see [Enabling FTS5 Support](#enabling-fts5-support). - - See [FST3 vs. FTS4](https://www.sqlite.org/fts3.html#differences_between_fts3_and_fts4) and [FTS5 vs. FTS3/4](https://www.sqlite.org/fts5.html#appendix_a) for more differences. > **Note**: In case you were still wondering, it is recommended to read the SQLite documentation: [FTS3 & FTS4](https://www.sqlite.org/fts3.html) and [FTS5](https://www.sqlite.org/fts5.html). @@ -323,40 +320,6 @@ let documents = try Document.filter(Column("content").match(pattern)).fetchAll(d ``` -## Enabling FTS5 Support - -When the FTS3 and FTS4 full-text engines don't suit your needs, you may want to use FTS5. See [Choosing the Full-Text Engine](#choosing-the-full-text-engine) to help you make a decision. - -The version of SQLite that ships with iOS, macOS, tvOS and watchOS does not always support the FTS5 engine. To enable FTS5 support, you'll need to install GRDB with one of those installation techniques: - -1. Use the GRDB.swift CocoaPod with a custom compilation option, as below. It uses the system SQLite, which is compiled with FTS5 support, but only on iOS 11.4+ / macOS 10.13+ / tvOS 11.4+ / watchOS 4.3+: - - ```ruby - pod 'GRDB.swift' - platform :ios, '11.4' # or above - - post_install do |installer| - installer.pods_project.targets.select { |target| target.name == "GRDB.swift" }.each do |target| - target.build_configurations.each do |config| - config.build_settings['OTHER_SWIFT_FLAGS'] = "$(inherited) -D SQLITE_ENABLE_FTS5" - end - end - end - ``` - - > **Warning**: make sure you use the right platform version! You will get runtime errors on devices with a lower version. - - > **Note**: there used to be a GRDBPlus CocoaPod with pre-enabled FTS5 support. This CocoaPod is deprecated: please switch to the above technique. - -2. Use the GRDB.swift/SQLCipher CocoaPod subspec (see [encryption](../README.md#encryption)): - - ```ruby - pod 'GRDB.swift/SQLCipher' - ``` - -3. Use a [custom SQLite build] and activate the `SQLITE_ENABLE_FTS5` compilation option. - - ## Create FTS5 Virtual Tables **FTS5 full-text tables store and index textual content.** diff --git a/Documentation/GRDB5MigrationGuide.md b/Documentation/GRDB5MigrationGuide.md index 8ff58693f5..797d1099f9 100644 --- a/Documentation/GRDB5MigrationGuide.md +++ b/Documentation/GRDB5MigrationGuide.md @@ -197,7 +197,7 @@ The changes can quite impact your application. We'll describe them below, as wel scheduling: .immediate, onError: { error in ... }, onChange: { [weak self] (players: [Player]) in - guard let self = self else { return } + guard let self else { return } self.updateView(players) }) // <- Here the view has already been updated. @@ -397,7 +397,9 @@ let publisher = observation // NEW: GRDB 5 let query: SQL = "UPDATE player SET name = \(name) WHERE id = \(id)" - let (sql, arguments) = try dbQueue.read(query.build) + let (sql, arguments) = try dbQueue.read { db in + try query.build(db) + } print(sql) // prints "UPDATE player SET name = ? WHERE id = ?" print(arguments) // prints ["O'Brien", 42] ``` diff --git a/Documentation/GoodPracticesForDesigningRecordTypes.md b/Documentation/GoodPracticesForDesigningRecordTypes.md index 60f4e73ebb..1bcdbd223b 100644 --- a/Documentation/GoodPracticesForDesigningRecordTypes.md +++ b/Documentation/GoodPracticesForDesigningRecordTypes.md @@ -1,836 +1,4 @@ Good Practices for Designing Record Types ========================================= -This guide aims at helping you leverage the best of GRDB [records] and [associations]. - -Since GRDB sits right between low-level libraries like SQLite itself or [FMDB], and high-level ORM like [Core Data] or [Realm], you may face questions when designing the model layer of your application. - -To support this guide, we'll design a simple library application that lets the user crawl through books and their authors. - -- [Trust SQLite More Than Yourself] -- [Persistable Record Types are Responsible for Their Tables] -- [Record Types Hide Intimate Database Details] -- [Singleton Records] -- [Define Record Requests] -- [Compose Records] -- [How to Design Database Managers] -- [Observe the Database and Refetch when Needed] - - -## Trust SQLite More Than Yourself - -It is important to put things in the right order. An SQLite database stored on one of your user's device is more important than the Swift code that accesses it. When a user installs a new version of your application, all the code may change, but the database remains the same. - -This is why we recommend defining a **robust database schema** even before playing with record types. - -SQLite is a battle-tested database. Even if you don't know it well, and aren't familiar with the SQL language, you are able to take profit from its solid foundation. It is very difficult to corrupt an SQLite database file. And it can make sure that only valid information is persisted on disk. - -This is important because we developers write bugs, and some of them will ship in the wild, affecting the users of our applications. But SQLite will prevent many of those bugs from corrupting our precious users' data. - -For example, if we were to define a [migration] that sets up our library database, made of books and their authors, we could write: - -```swift -var migrator = DatabaseMigrator() - -migrator.registerMigration("createLibrary") { db in - try db.create(table: "author") { t in // (1) - t.autoIncrementedPrimaryKey("id") // (2) - t.column("name", .text).notNull() // (3) - t.column("country", .text) // (4) - } - - try db.create(table: "book") { t in - t.autoIncrementedPrimaryKey("id") - t.column("title", .text).notNull() // (5) - t.column("authorId", .integer) // (6) - .notNull() // (7) - .indexed() // (8) - .references("author", onDelete: .cascade) // (9) - } -} - -try migrator.migrate(dbQueue) -``` - -1. Our database table names follow the [recommended convention]: they are English, singular, and camelCased. They look like Swift identifiers: `author`, `book`, `postalAddress`, `httpRequest`. -2. Each author has a unique id. -3. An author must have a name. -4. The country of an author is not always known. -5. A book must have a title. -6. The `book.authorId` column is used to link a book to the author it belongs to. -7. The `book.authorId` column is not null so that SQLite guarantees that all books have an author. -8. The `book.authorId` column is indexed in order to ease the selection of an author's books. -9. We define a foreign key from `book.authorId` column to `authors.id`, so that SQLite guarantees that no book can refer to a missing author. On top of that, the `onDelete: .cascade` option has SQLite automatically delete all of an author's books when that author is deleted. See [Foreign Key Actions] for more information. - -Thanks to this database schema, you can be confident that no matter how wrong our application goes, it will always process *consistent data*. Even after a hard crash, application will always find the author of any book, all books will have a non-nil title, etc. - -> :bulb: **Tip**: Don't look at your local SQLite database as you look at the JSON you load from a remote server. You can't control the JSON, its format and content: your application must defend itself against wacky servers. But you can control the database. Put the database on your side, make it trustable. Learn about relational databases, and how they can help you guarantee the quality of your application data. Put as much energy as you can in the proper definition of your database schema. -> -> :bulb: **Tip**: Plan early for future versions of your application, and use [migrations]. -> -> :bulb: **Tip**: In the definition of your migrations, define tables and their columns with **strings**: -> -> ```swift -> migrator.registerMigration("createLibrary") { db in -> // RECOMMENDED -> try db.create(table: "author") { t in -> t.autoIncrementedPrimaryKey("id") -> ... -> } -> -> // NOT RECOMMENDED -> try db.create(table: Author.databaseTableName) { t in -> t.autoIncrementedPrimaryKey(Author.Columns.id.name) -> ... -> } -> } -> ``` -> -> By using strings, you make sure that you will not have to change the Swift code of your migrations in the future. Even if author columns eventually change. Even if the Author type eventually gets replaced with another type. Even when your startup eventually pivots and starts selling pet food. A good migration that never changes is easy to test once and for good. A good migration that never changes will run smoothly on all devices in the wild, even if a user upgrades straight from version 1.0 to version 5.0 of your application. -> -> So make sure that migrations don't use application types and values: migrations should talk to the database, only to the database, and use the database language: **strings**. - - -## Persistable Record Types are Responsible for Their Tables - -Define one record type per database table, and make it adopt a [PersistableRecord] protocol. - -In this sample code, we'll use Codable structs, but there are [other ways](../README.md#examples-of-record-definitions) to define records. - -```swift -struct Author: Codable, Identifiable { - var id: Int64? - var name: String - var country: String? -} - -struct Book: Codable, Identifiable { - var id: Int64? - var authorId: Int64 - var title: String -} -``` - -We add database powers to our types with [record protocols]. Since our records use auto-incremented ids, we provide an implementation of the `didInsert` method: - -```swift -// Add Database access - -extension Author: FetchableRecord, MutablePersistableRecord { - // Update auto-incremented id upon successful insertion - mutating func didInsert(_ inserted: InsertionSuccess) { - id = inserted.rowID - } -} - -extension Book: FetchableRecord, MutablePersistableRecord { - // Update auto-incremented id upon successful insertion - mutating func didInsert(_ inserted: InsertionSuccess) { - id = inserted.rowID - } -} -``` - -That's it. The `Author` type can read and write in the `author` database table. `Book` as well, in `book`. - -> :bulb: **Tip**: When a column of a database table can't be NULL, store it in a non-optional property of your record type. On the other side, when the database may contain NULL, define an optional property. -> -> :bulb: **Tip**: When a database table uses an auto-incremented identifier, make the `id` property optional (so that you can instantiate a record before it gets inserted and gains an id), and implement the `didInsert(_:)` method: -> -> ```swift -> try dbQueue.write { db in -> var author = Author(id: nil, name: "Hermann Melville", country: "United States") -> try author.insert(db) -> print(author.id!) // Guaranteed non-nil id -> } -> ``` -> -> :bulb: **Tip**: When the database table has a single-column primary key, have your record type adopt the standard [Identifiable] protocol. This allows GRDB to define type-safe id-related methods: -> -> ```swift -> let authorID: Int64 = ... -> let author: Author? = try dbQueue.read { db in -> try Author.fetchOne(db, id: authorID) -> } -> ``` - -Now that `Author` and `Book` can read and write in their own database tables, they are responsible for it. Make sure each record type deals with one database table, and only one database table! - - -## Record Types Hide Intimate Database Details - -The application uses `Book` and `Author` as regular structs, using their properties. Each of those properties matches a column in the database (`Book.title`, `Author.id`), and is defined with a Swift type that is natively supported by SQLite (`String`, `Int`, etc.) - -Sometimes, it happens that raw database column names and types are not a very good fit for the application. - -Let's look at three examples: - -1. Authors write books, and more specifically novels, poems, essays, or theatre plays. Let's add a `kind` column in the database. For easy debugging of the database contents, a book kind is represented as a string ("novel", "essay", etc.): - - ```swift - try db.create(table: "book") { t in - ... - t.column("kind", .text).notNull() - } - ``` - - In Swift, it is not a good practice to use `String` for the type of the `kind` property. We want an enum instead: - - ```swift - struct Book: Codable { - enum Kind: String, Codable { - case novel, poems, essay, play - } - var id: Int64? - var authorId: Int64 - var title: String - var kind: Kind - } - ``` - - In order to make it possible to use `Book.Kind` in book requests (see [Define Record Requests] below), we add this conformance: - - ```swift - extension Book.Kind: DatabaseValueConvertible { } - ``` - - > :bulb: Records can pick the best suited [Value] type for their column properties (Bool, Int, String, Date, etc.) Thanks to its enum property, the `Book` record prevents unknown book kinds from entering the database. - -2. GPS coordinates are usually stored in two distinct `latitude` and `longitude` columns. But the standard way to deal with such coordinate is a single `CLLocationCoordinate2D` struct. - - When this happens, keep column properties private, and provide sensible accessors instead: - - ```swift - struct Place: Codable { - var id: Int64? - var name: String - private var latitude: CLLocationDegrees - private var longitude: CLLocationDegrees - - var coordinate: CLLocationCoordinate2D { - get { - CLLocationCoordinate2D(latitude: latitude, longitude: longitude) - } - set { - latitude = newValue.latitude - longitude = newValue.longitude - } - } - } - ``` - - > :bulb: Private properties make it possible to hide raw columns from the rest of the application. - -3. The record below exposes a `price: Decimal` property ($12.00), backed by an integer column that stores a quantity of cents (1200). An integer column is preferred because it allows SQLite to compute exact sums of prices. - - ```swift - struct Product: Codable { - var id: Int64? - var name: String - private var priceCents: Int - - var price: Decimal { - get { Decimal(priceCents) / 100 } - set { priceCents = NSDecimalNumber(decimal: newValue * 100).intValue } - } - } - ``` - - > :bulb: Private properties allow records to choose both their best database representation, and at the same time, their best Swift interface. - -**Generally speaking**, record types are the dedicated place, in your code, where you can transform raw database values into well-suited types that the rest of the application will enjoy. When needed, you can even [validate values](../README.md#persistence-callbacks) before they enter the database. - - -## Singleton Records - -**Singleton Records** are records that store configuration values, user preferences, and generally some global application state. They are backed by a database table that contains a single row. - -The recommended setup for such records is described in the [Single-Row Tables](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/singlerowtables) guide. Go check it, and come back when you're done! - - -## Define Record Requests - -Now that we have record types that are able to read and write in the database, we'd like to put them to good use. - -> :bulb: **Tip**: Define an enumeration of columns that you will use in order to filter, sort, etc. - -When your record type is a [Codable Record], derive columns from the [CodingKeys] enum: - -```swift -// For a codable record -extension Author { - // Define database columns from CodingKeys - enum Columns { - static let id = Column(CodingKeys.id) - static let name = Column(CodingKeys.name) - static let country = Column(CodingKeys.country) - } -} -``` - -Otherwise, declare a plain `String` enum that conforms to the `ColumnExpression` protocol: - -```swift -// For a non-codable record -extension Author { - // Define database columns as an enum - enum Columns: String, ColumnExpression { - case id, name, country - } -} -``` - -Those columns let you define requests: - -```swift -try dbQueue.read { db in - // Order authors by name, in a localized case-insensitive fashion - let sortedAuthors: [Author] = try Author.all() - .order(Author.Columns.name.collating(.localizedCaseInsensitiveCompare)) - .fetchAll(db) - - // French authors - let frenchAuthors: [Author] = try Author.all() - .filter(Author.Columns.country == "France") - .fetchAll(db) -} -``` - -> :bulb: **Tip**: Define commonly used requests in a constrained extension of the `DerivableRequest` protocol. - -When you find yourself build similar requests over and over in your application, you may want to define a reusable and composable request vocabulary. This will avoid repetition in your app, ease refactoring, and enable [testability](DemoApps/GRDBDemoiOS/GRDBDemoiOSTests/PlayerTests.swift). - -To do so, extend the `DerivableRequest` protocol. It generally lets you filter, sort, leverage associations (we'll talk about associations in the [Compose Records] chapter below), etc: - -```swift -// Author requests -extension DerivableRequest { - /// Order authors by name, in a localized case-insensitive fashion - func orderByName() -> Self { - let name = Author.Columns.name - return order(name.collating(.localizedCaseInsensitiveCompare)) - } - - /// Filters authors from a country - func filter(country: String) -> Self { - filter(Author.Columns.country == country) - } - - /// Filters authors with at least one book - func havingBooks() -> Self { - having(Author.books.isEmpty == false) - } -} -``` - -Those methods encapsulate intimate database details, and allow you to compose database requests in a fluent and legible style: - -```swift -try dbQueue.read { db in - let sortedAuthors: [Author] = try Author.all() - .orderByName() - .fetchAll(db) - - let frenchAuthors: [Author] = try Author.all() - .filter(country: "France") - .fetchAll(db) - - let sortedSpanishAuthorsHavingBooks: [Author] = try Author.all() - .filter(country: "Spain") - .havingBooks() - .orderByName() - .fetchAll(db) -} -``` - -Because they are defined in an extension of the `DerivableRequest` protocol, our customized methods can decorate both requests and associations. See how the implementation of `filter(authorCountry:)` for books, below, uses the `filter(country:)` for authors: - -```swift -// Book requests -extension DerivableRequest { - /// Filters books by kind - func filter(kind: Book.Kind) -> Self { - filter(Book.Columns.kind == kind) - } - - /// Filters books from a country - func filter(authorCountry: String) -> Self { - // Books do not have any country column. But their author has one. - // A book is from a country if it can be joined to an author from this country: - joining(required: Book.author.filter(country: authorCountry)) - } -} - -try dbQueue.read { db in - let italianNovels: [Book] = try Book.all() - .filter(kind: .novel) - .filter(authorCountry: "Italy") - .fetchAll(db) -} -``` - -Extensions to the `DerivableRequest` protocol can not change the type of requests. This has to be expressed in an extension to `QueryInterfaceRequest`. For example: - -```swift -// Author requests -extension QueryInterfaceRequest { - // Selects author ids - func selectId() -> QueryInterfaceRequest { - selectPrimaryKey(as: Int64.self) - } -} - -// IDs of French authors -let ids: [Int64] = try Author.all().filter(country: "France").selectId().fetchAll(db) -``` - -## Compose Records - -We'd like to navigate from books to authors and vice-versa: we may want to know an author's books, or a book's author. - -GRDB can help you do this with [associations]. In our case, each author **has many** books, and each book **belongs to** its author. Here is how you define those associations: - -```swift -extension Author { - static let books = hasMany(Book.self) -} - -extension Book { - static let author = belongsTo(Author.self) -} -``` - -Thanks to this setup, you can fetch associated records, or compute aggregated values from associated records. For example: - -```swift -// Fetch all authors and their number of books -struct AuthorInfo: Decodable, FetchableRecord { - var author: Author - var bookCount: Int -} -let authorInfos: [AuthorInfo] = try dbQueue.read { db in - let request = Author.annotated(with: Author.books.count) - return try AuthorInfo.fetchAll(db, request) -} - -// Fetch all Colombian books and their authors: -struct Authorship: Decodable, FetchableRecord { - var book: Book - var author: Author -} -let authorships: [Authorship] = try dbQueue.read { db in - let request = Book.including(required: Book.author.filter(country: "Colombia")) - return try Authorship.fetchAll(db, request) -} -``` - -As in the sample code above, requests which feed from several associated records will often have you define extra record types, such as `AuthorInfo` and `Authorship`. Those extra record types are designed to be able to decode database requests. The names and types of their properties follow the conventions defined by [associations]. Make them conform to the Decodable and FetchableRecord protocols so that they can decode database rows in a breeze. - -Unlike the primitive persistable record types `Author` and `Book`, those records can not write in the database. They are simple data types, passive views on the database content. Remember, only [Persistable Record Types are Responsible for Their Tables]. - -> **Note**: The example `AuthorInfo` and `Authorship` types above may look superfluous to you. After all, other ORMs out there are able to navigate in complex graphs of records without much fuss, aren't they? -> -> That is because other ORMs perform lazy loading: -> -> ```ruby -> # Ruby's Active Record -> author = Author.find(123) # fetch author -> book_count = author.books.count # lazily counts books on demand -> ``` -> -> GRDB does not perform lazy loading. Lazy loading either requires records to be *managed* (as in [Core Data] and [Realm]), or that all data processing happens in a *single function* (think of an HTTP request handled with a web-oriented ORM like [Active Record] and [Django]). The underlying issue is *data consistency*: you always want your memory objects to accurately represent your application data, without any glitch, ever. This involves the subtle database concept of [isolation] against concurrent changes. In a GUI application, this can't be achieved without a very complex record management, and non-trivial pain points for the application developer. -> -> This is why GRDB has removed lazy loading from the list of desirable features. Instead, it provides the tooling needed to fetch data, even complex ones, in a single and safe stroke. See the "Solving Problems" chapter of [Why Adopt GRDB?](WhyAdoptGRDB.md#solving-problems) for more information. - -Granted with primitive and derived record types, your application will load the data it needs, at the moment it needs it, as below: - -1. Prepare the application screen that lists all authors: - - ```swift - let authors: [Author] = try dbQueue.read { db in - try Author.all().orderByName().fetchAll(db) - } - ``` - -2. Prepare the application screen that displays an author and her books: - - ```swift - struct AuthorInfo: Codable, FetchableRecord { - var author: Author - var books: [Book] - } - let authorId = 123 - let authorInfo: AuthorInfo? = try dbQueue.read { db in - let request = Author - .filter(id: authorId) - .including(all: Author.books) - return try AuthorInfo.fetchOne(db, request) - } - ``` - -3. Prepare the application screen that displays a book information: - - ```swift - struct BookInfo: Decodable, FetchableRecord { - var book: Book - var author: Author - } - let bookId = 123 - let bookInfo: BookInfo? = try dbQueue.read { db in - let request = Book - .filter(id: bookId) - .including(required: Book.author) - return try BookInfo.fetchOne(db, request) - } - ``` - -> :bulb: **Tip**: Identify the various **graph of objects** needed by the various parts of your application. Design them independently, by composing primitive record types linked by associations. Fetch the data your application needs, at the moment it needs it, no more, no less. - -A last extension on your record types will further help navigation from records to associated ones: - -```swift -extension Author { - /// The request for the author's books - var books: QueryInterfaceRequest { - request(for: Author.books) - } -} - -extension Book { - /// The request for the author of the book - var author: QueryInterfaceRequest { - request(for: Book.author) - } -} -``` - -Those properties provide an alternative way to feed our application: - -1. Prepare the application screen that displays an author and her books: - - ```swift - struct AuthorInfo { - var author: Author - var books: [Book] - } - let authorId = 123 - let authorInfo: AuthorInfo? = try dbQueue.read { db in - guard let author = try Author.fetchOne(db, id: authorId) else { - return nil - } - let books = try author.books.fetchAll(db) - return AuthorInfo( - author: author, - books: books) - } - ``` - -2. Prepare the application screen that displays a book information: - - ```swift - struct BookInfo { - var book: Book - var author: Author - } - let bookId = 123 - let bookInfo: BookInfo? = try dbQueue.read { db in - guard let book = try Book.fetchOne(db, id: bookId) else { - return nil - } - guard let author = try book.author.fetchOne(db) else { - return nil - } - return BookInfo(book: book, author: author) - } - ``` - - -## How to Design Database Managers - -Many developers want to hide GRDB database queues and pools inside "database managers": - -```swift -// LibraryManager grants access to the library database. -class LibraryManager { - private let dbQueue: DatabaseQueue - init(dbQueue: DatabaseQueue) { - self.dbQueue = dbQueue - } -} -``` - -> :bulb: **Tip**: Don't let your database managers create their own databases. Instead, give them a database created by, say, the ApplicationDelegate. This will allow you to efficiently test the database manager with an in-memory database, for example. - -Design your database managers with the [GRDB concurrency rules] in mind. - -Practically, let's start with a naive example, and gradually improve it: - -```swift -// A naive manager that we will improve -class NaiveLibraryManager { - private let dbQueue: DatabaseQueue - init(dbQueue: DatabaseQueue) { - self.dbQueue = dbQueue - } - - func author(id: Int64) -> Author? { - do { - return try dbQueue.read { db in - try Author.fetchOne(db, id: id) - } - } catch { - return nil - } - } - - func book(id: Int64) -> Book? { - do { - return try dbQueue.read { db in - try Book.fetchOne(db, id: id) - } - } catch { - return nil - } - } - - func books(writtenBy author: Author) -> [Book] { - do { - return try dbQueue.read { db in - try author.books.fetchAll(db) - } - } catch { - return [] - } - } -} -``` - -**This manager can be improved in two ways.** - -- [Embrace Errors] -- [Thread-Safety is also an Application Concern] - -### Embrace Errors - -Have database managers throw database errors instead of catching them. - -Consider Apple's [CNContactStore](https://developer.apple.com/documentation/contacts/cncontactstore), for example. Does it hide errors when you fetch or save address book contacts? No it does not. Keychain, Media assets, File system, Core Data? No they do not hide errors either. Follow the practices of Apple engineers: do not hide errors :muscle: - -Exposing errors will help you building your application: - -- You will be able to inspect errors during development, and fix bugs. `do { ... } catch { print(error) }` will save you hours of clueless questioning. -- You will be able to opt in for advanced OS features like [data protection]. - -> :bulb: **Tip**: Don't hide database errors. Let the application handle them, because only application can decide how to handle them. - -This gives the improved manager below. And it has less code, which means less bugs :bowtie: - -```swift -// An improved manager that does not hide errors -class ImprovedLibraryManager { - private let dbQueue: DatabaseQueue - init(dbQueue: DatabaseQueue) { - self.dbQueue = dbQueue - } - - func author(id: Int64) throws -> Author? { - try dbQueue.read { db in - try Author.fetchOne(db, id: id) - } - } - - func book(id: Int64) throws -> Book? { - try dbQueue.read { db in - try Book.fetchOne(db, id: id) - } - } - - func books(writtenBy author: Author) throws -> [Book] { - try dbQueue.read { db in - try author.books.fetchAll(db) - } - } -} -``` - - -### Thread-Safety is also an Application Concern - -Now, let's make our database manager **thread-safe**. - -This one is more subtle. In order to understand what is wrong in our naive manager, one has to consider how it is used by the application. - -For example, in the screen that displays an author and her books, we would write: - -```swift -let authorId = 123 -if let author = libraryManager.author(id: authorId) { - let books = libraryManager.books(writtenBy: author) - // Use author and books -} -``` - -This code is not thread-safe, because other application threads may have modified the database between the two database accesses. You may end up with an author without any book, and this sure does not make a pretty application screen. - -Such bugs are uneasy to reproduce. Sometimes your application will refresh the library content from the network, and delete an author right at the wrong time. The more users your application has, the more users will see weird screens. And of course, you'll be bitten right on the day of the demo in front of the boss. - -Fortunately, GRDB has all the tools you need to prevent such nasty data races: - -> :bulb: **Tip**: Make sure you fetch all the data your application needs at a given moment of time, in a **single database read**. - -This gives a much safer manager: - -```swift -// A manager that actually manages -class LibraryManager { - private let dbQueue: DatabaseQueue - init(dbQueue: DatabaseQueue) { - self.dbQueue = dbQueue - } -} - -// Feeds the list of authors -extension LibraryManager { - struct AuthorListItem: Decodable, FetchableRecord { - let author: Author - let bookCount: Int - } - - func authorList() throws -> [AuthorListItem] { - try dbQueue.read { db in - let request = Author - .annotated(with: Author.books.count) - .orderByName() - return try AuthorListItem.fetchAll(db, request) - } - } -} - -// Feeds a book screen -extension LibraryManager { - struct BookInfo { - var book: Book - var author: Author - } - - func bookInfo(bookId: Int64) throws -> BookInfo? { - try dbQueue.read { db in - guard let book = try Book.fetchOne(db, id: bookId) else { - return nil - } - guard let author = try book.author.fetchOne(db) else { - return nil - } - return BookInfo(book: book, author: author) - } - } -} - -// Feeds an author screen -extension LibraryManager { - struct AuthorInfo { - var author: Author - var books: [Book] - } - - func authorInfo(authorId: Int64) throws -> AuthorInfo? { - try dbQueue.read { db in - guard let author = try Author.fetchOne(db, id: authorId) else { - return nil - } - let books = try author.books.fetchAll(db) - return AuthorInfo(author: author, books: books) - } - } -} -``` - -The `AuthorListItem`, `BookInfo`, `AuthorInfo` types returned by the manager are designed to feed your views. - -When a new screen is added to your application, and you want to make sure it displays **consistent data** free from any data race, make sure you update the manager if needed. The rule is very simple: consumed data must come from a **single** database access method (`dbQueue.read`, `write`, etc.), or [ValueObservation]. - -In other words: since GRDB is an unmanaged ORM, some amount of management must be imported into your application in order to make it fully thread-safe. - -> **Note**: Wrapping several fetches in a single `read` method may look like an inconvenience to you. After all, other ORMs don't require that much ceremony: -> -> ```ruby -> # Ruby's Active Record -> book = Book.find(123) # fetch book -> author = book.author # fetch author -> # use book and author -> ``` -> -> The problem is that it is very hard to guarantee that you will surely fetch an author after you have fetched a book, despite the constraints of the database schema. One has to perform subsequent fetches in the proper [isolation] level, so that eventual concurrent writes that modify the database are unable to mess with subsequent requests. -> -> This isolation can be achieved with record management, as in [Core Data] or [Realm], that target long-running multi-threaded applications. On the other side, most web-oriented ORMs rely on short-lived database transactions, so that each HTTP request can be processed independently of others. -> -> GRDB is not a managed ORM. It thus has to use the same isolation techniques as web-oriented ORMs. But unlike web-oriented ORMs, GRDB can't provide implicit isolation: the application must decide when it wants to safely read information in the database, and this decision is made explicit, in your application code, with database access methods such as `dbQueue.read`. -> -> See the [Concurrency Guide] for detailed information, and the "Solving Problems" chapter of [Why Adopt GRDB?](WhyAdoptGRDB.md#solving-problems) for more rationale. - - -## Observe the Database and Refetch when Needed - -We have seen above that the primitive Author and Book record types are [responsible](#persistable-record-types-are-responsible-for-their-tables) for their own database tables. Later we built [requests](#define-record-requests) and [composed](#compose-records) records into more complex ones such as BookInfo or AuthorInfo. We have shown how [database managers](#how-to-design-database-managers) should expose database content to the rest of the application. - -Database content which has been fetched into memory eventually becomes obsoleted, as the application modifies the database content. - -It is up to the application to decide how long it should keep fetched information in memory. Very often though, the application will want to keep memory information synchronized with the database content. - -This synchronization is not automatic with GRDB: records do not "auto-update". That is because applications do not always want this feature, and because it is difficult to write correct multi-threaded applications when values can change in unexpected ways. - -Instead, have a look at [Database Observation]: - -> :bulb: **Tip**: [ValueObservation] performs automated tracking of database changes. -> -> :bulb: **Tip**: [Combine Support] allows automated tracking of database changes, in the [Combine](https://developer.apple.com/documentation/combine) way. -> -> :bulb: **Tip**: [RxGRDB] performs automated tracking of database changes, in the [RxSwift](https://github.com/ReactiveX/RxSwift) way. -> -> :bulb: **Tip**: [TransactionObserver] provides low-level database observation, for your most advanced needs. -> -> :bulb: **Tip**: Don't try to write complex methods that both modify the database and the values in memory at the same time. Instead, modify the database with plain record types, and rely on database observation for automatically refreshing values, even complex ones. - -[records]: ../README.md#records -[associations]: AssociationsBasics.md -[FMDB]: https://github.com/ccgus/fmdb -[Core Data]: https://developer.apple.com/documentation/coredata -[Realm]: https://realm.io -[Active Record]: http://guides.rubyonrails.org/active_record_basics.html -[Django]: https://docs.djangoproject.com/en/2.0/topics/db/ -[record protocols]: ../README.md#record-protocols-overview -[Separation of Concerns]: https://en.wikipedia.org/wiki/Separation_of_concerns -[Single Source of Truth]: https://en.wikipedia.org/wiki/Single_source_of_truth -[Divide and Conquer]: https://en.wikipedia.org/wiki/Divide_and_rule -[Why Adopt GRDB?]: WhyAdoptGRDB.md -[isolation]: https://en.wikipedia.org/wiki/Isolation_(database_systems) -[migrations]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/migrations -[migration]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/migrations -[Foreign Key Actions]: https://sqlite.org/foreignkeys.html#fk_actions -[Concurrency Guide]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency -[GRDB concurrency rules]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency -[PersistableRecord]: ../README.md#persistablerecord-protocol -[Database Observation]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseobservation -[ValueObservation]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation -[RxGRDB]: https://github.com/RxSwiftCommunity/RxGRDB -[TransactionObserver]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/transactionobserver -[Trust SQLite More Than Yourself]: #trust-sqlite-more-than-yourself -[Persistable Record Types are Responsible for Their Tables]: #persistable-record-types-are-responsible-for-their-tables -[Record Types Hide Intimate Database Details]: #record-types-hide-intimate-database-details -[Singleton Records]: #singleton-records -[Define Record Requests]: #define-record-requests -[Compose Records]: #compose-records -[How to Design Database Managers]: #how-to-design-database-managers -[Observe the Database and Refetch when Needed]: #observe-the-database-and-refetch-when-needed -[query interface]: ../README.md#the-query-interface -[observe database changes]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseobservation -[data protection]: ../README.md#data-protection -[Embrace Errors]: #embrace-errors -[Thread-Safety is also an Application Concern]: #thread-safety-is-also-an-application-concern -[recommended convention]: AssociationsBasics.md#associations-and-the-database-schema -[Association Aggregates]: AssociationsBasics.md#association-aggregates -[Codable Record]: ../README.md#codable-records -[CodingKeys]: https://developer.apple.com/documentation/foundation/archives_and_serialization/encoding_and_decoding_custom_types -[Combine Support]: Combine.md -[Value]: ../README.md#values -[Identifiable]: https://developer.apple.com/documentation/swift/identifiable +This guide [has moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices). diff --git a/Documentation/Playgrounds/Associations.playground/Contents.swift b/Documentation/Playgrounds/Associations.playground/Contents.swift index be4015f981..eb8c84ab04 100644 --- a/Documentation/Playgrounds/Associations.playground/Contents.swift +++ b/Documentation/Playgrounds/Associations.playground/Contents.swift @@ -29,10 +29,7 @@ migrator.registerMigration("createLibrary") { db in try db.create(table: "book") { t in t.autoIncrementedPrimaryKey("id") t.column("title", .text).notNull() - t.column("authorId", .integer) - .notNull() - .indexed() - .references("author", onDelete: .cascade) + t.belongsTo("author", onDelete: .cascade).notNull() } } @@ -103,7 +100,7 @@ let authorInfo: AuthorInfo? = try dbQueue.read { db in let books = try author.books.fetchAll(db) return AuthorInfo(author: author, books: books) } -if let authorInfo = authorInfo { +if let authorInfo { print("\(authorInfo.author.name) has written:") for book in authorInfo.books { print("- \(book.title)") @@ -126,7 +123,7 @@ let bookInfo: BookInfo? = try dbQueue.read { db in .including(required: Book.author) return try BookInfo.fetchOne(db, request) } -if let bookInfo = bookInfo { +if let bookInfo { print("\(bookInfo.book.title) was written by \(bookInfo.author.name)") } diff --git a/Documentation/Playgrounds/TransactionObserver.playground/Contents.swift b/Documentation/Playgrounds/TransactionObserver.playground/Contents.swift index fcfc802345..0db433c81e 100644 --- a/Documentation/Playgrounds/TransactionObserver.playground/Contents.swift +++ b/Documentation/Playgrounds/TransactionObserver.playground/Contents.swift @@ -23,7 +23,7 @@ migrator.registerMigration("createPet") { db in try db.create(table: "pet") { t in t.autoIncrementedPrimaryKey("id") t.column("name", .text).notNull() - t.column("ownerId", .integer).references("person", onDelete: .cascade) + t.belongsTo("owner", inTable: "person", onDelete: .cascade) } } try! migrator.migrate(dbQueue) diff --git a/Documentation/QueryInterfaceOrganization.md b/Documentation/QueryInterfaceOrganization.md index f1c27f7148..e78e652a0e 100644 --- a/Documentation/QueryInterfaceOrganization.md +++ b/Documentation/QueryInterfaceOrganization.md @@ -195,7 +195,7 @@ protocol DerivableRequest: AggregatingRequest, FilteredRequest, - `SelectionRequest` provides selection methods such as `select(selection)` or `annotated(with: selection)` - `TableRequest` provides table targeting methods such as `aliased(tableAlias)` -DerivableRequest makes it possible to build reusable code snippets that apply to both requests and associations. You'll read more about it in the [Good Practices for Designing Record Types](GoodPracticesForDesigningRecordTypes.md) and [Associations](AssociationsBasics.md). +DerivableRequest makes it possible to build reusable code snippets that apply to both requests and associations. You'll read more about it in the [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) and [Associations](AssociationsBasics.md). ### FetchRequest diff --git a/Documentation/ReleaseProcess.md b/Documentation/ReleaseProcess.md index e8aa77fb79..008b24d66c 100644 --- a/Documentation/ReleaseProcess.md +++ b/Documentation/ReleaseProcess.md @@ -6,7 +6,7 @@ Release Process To release a new GRDB version: - Tests - - `make test` + - `make distclean test` - Build and run GRDBDemoiOS in Release configuration on a device - Archive GRDBDemoiOS - Check for performance regression with GRDBOSXPerformanceTests @@ -21,7 +21,6 @@ To release a new GRDB version: - Check tag authors: `git for-each-ref --format '%(refname) %(authorname)' refs/tags` - Push to the master & development branch - `pod trunk push --allow-warnings GRDB.swift.podspec` -- Update https://github.com/groue/WWDCCompanion - Update [performance comparison](https://github.com/groue/GRDB.swift/wiki/Performance): `make test_performance | Tests/parsePerformanceTests.rb | Tests/generatePerformanceReport.rb` diff --git a/Documentation/SQLInterpolation.md b/Documentation/SQLInterpolation.md index 884c19788e..2af59d2fb4 100644 --- a/Documentation/SQLInterpolation.md +++ b/Documentation/SQLInterpolation.md @@ -112,13 +112,15 @@ let components: [SQL] = [ let query = components.joined(separator: " ") ``` -Extract the plain SQL string from a literal: +To extract the plain SQL string from a literal, you need a `Database` connection such as the one provided by the `read` and `write` methods: ```swift -let query: SQL = "UPDATE player SET name = \(name) WHERE id = \(id)" -let (sql, arguments) = try dbQueue.read(query.build) -print(sql) // prints "UPDATE player SET name = ? WHERE id = ?" -print(arguments) // prints ["O'Brien", 42] +try dbQueue.read { db in + let query: SQL = "UPDATE player SET name = \(name) WHERE id = \(id)" + let (sql, arguments) = try query.build(db) + print(sql) // prints "UPDATE player SET name = ? WHERE id = ?" + print(arguments) // prints ["O'Brien", 42] +} ``` Build a literal from a plain SQL string: @@ -363,7 +365,7 @@ Let's extend Player with database methods. This chapter lists all kinds of supported interpolations. -- Types adopting the [TableRecord] protocol: +- Types adopting the [TableRecord] protocol and [Table](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/table) instances: ```swift struct Player: TableRecord { ... } @@ -371,9 +373,13 @@ This chapter lists all kinds of supported interpolations. // SELECT * FROM player "SELECT * FROM \(Player.self)" - // INSERT INTO player ... + // SELECT * FROM player let player: Player = ... - "INSERT INTO \(tableOf: player) ..." + "SELECT * FROM \(tableOf: player) ..." + + // SELECT * FROM player + let playerTable = Table("player") + "SELECT * FROM \(playerTable)" ``` - Columns selected by [TableRecord]: diff --git a/Documentation/WhyAdoptGRDB.md b/Documentation/WhyAdoptGRDB.md index 86fd94b1c1..8ce463636e 100644 --- a/Documentation/WhyAdoptGRDB.md +++ b/Documentation/WhyAdoptGRDB.md @@ -90,7 +90,7 @@ For your convenience, those record protocols can be derived from the [Decodable Being a protocol-oriented library that welcomes immutable types, GRDB records are unlike records in other ORM libraries. Particularly, records do not auto-update, and records are not uniqued. We'll see below that the lack of those features can be replaced with **database change notifications**, with many advantages. -See [Good Practices for Designing Record Types](GoodPracticesForDesigningRecordTypes.md) for some practical advice. +See [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) for some practical advice. ### Allow database records to cross threads @@ -190,7 +190,7 @@ Finally, raw FMDatabase, SQLite.swift, and Core Data are the hardest tools, and For detailed information about GRDB concurrency, check the [Concurrency Guide]. -For practical advice on designing the database access layer of your application, see the [Good Practices for Designing Record Types](GoodPracticesForDesigningRecordTypes.md). +For practical advice on designing the database access layer of your application, see the [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices). ### Never pay for using raw SQL diff --git a/GRDB.swift.podspec b/GRDB.swift.podspec index 452b046af9..5ae61b42b8 100644 --- a/GRDB.swift.podspec +++ b/GRDB.swift.podspec @@ -1,6 +1,6 @@ Pod::Spec.new do |s| s.name = 'GRDB.swift' - s.version = '6.6.0' + s.version = '6.27.0' s.license = { :type => 'MIT', :file => 'LICENSE' } s.summary = 'A toolkit for SQLite databases, with a focus on application development.' @@ -20,6 +20,9 @@ Pod::Spec.new do |s| ss.source_files = 'GRDB/**/*.swift', 'Support/grdb_config.h' ss.framework = 'Foundation' ss.library = 'sqlite3' + ss.xcconfig = { + 'OTHER_SWIFT_FLAGS' => '$(inherited) -D SQLITE_ENABLE_FTS5', + } end s.subspec 'SQLCipher' do |ss| diff --git a/GRDB.xcodeproj/project.pbxproj b/GRDB.xcodeproj/project.pbxproj index 089a9fd2a6..9c13f9eb07 100755 --- a/GRDB.xcodeproj/project.pbxproj +++ b/GRDB.xcodeproj/project.pbxproj @@ -13,6 +13,10 @@ 56012B9F257404DF00B4925B /* CommonTableExpression.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56012B742574048B00B4925B /* CommonTableExpression.swift */; }; 560233C42724234F00529DF3 /* SharedValueObservation.swift in Sources */ = {isa = PBXBuildFile; fileRef = 560233C32724234F00529DF3 /* SharedValueObservation.swift */; }; 560233C92724338800529DF3 /* SharedValueObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 560233C82724338800529DF3 /* SharedValueObservationTests.swift */; }; + 5603CEBA2AC862EC00CF097D /* SQLJSONFunctions.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CEB62AC862EC00CF097D /* SQLJSONFunctions.swift */; }; + 5603CEBB2AC862EC00CF097D /* SQLJSONExpressible.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CEB72AC862EC00CF097D /* SQLJSONExpressible.swift */; }; + 5603CEBC2AC862EC00CF097D /* JSONColumn.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CEB82AC862EC00CF097D /* JSONColumn.swift */; }; + 5603CED42AC8642F00CF097D /* JSONExpressionsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CEC92AC8631600CF097D /* JSONExpressionsTests.swift */; }; 560432A0228F00C2009D3FE2 /* OrderedDictionaryTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56043299228F00C2009D3FE2 /* OrderedDictionaryTests.swift */; }; 560432A3228F1668009D3FE2 /* AssociationPrefetchingObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 560432A2228F1667009D3FE2 /* AssociationPrefetchingObservationTests.swift */; }; 5604484925DEEEF7002BAA79 /* AssociationPrefetchingRelationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5604484825DEEEF7002BAA79 /* AssociationPrefetchingRelationTests.swift */; }; @@ -54,6 +58,9 @@ 561CFA7823735016000C8BAA /* TableRecordUpdateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561CFA7123735015000C8BAA /* TableRecordUpdateTests.swift */; }; 561CFA982376E546000C8BAA /* AssociationHasManyThroughOrderingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561CFA912376E546000C8BAA /* AssociationHasManyThroughOrderingTests.swift */; }; 561CFA9C2376EC86000C8BAA /* AssociationHasManyOrderingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561CFA9B2376EC86000C8BAA /* AssociationHasManyOrderingTests.swift */; }; + 561F38D82AC88A550051EEE9 /* JSONColumnTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38D72AC88A550051EEE9 /* JSONColumnTests.swift */; }; + 561F38EF2AC9CE130051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38EE2AC9CE130051EEE9 /* DatabaseDataEncodingStrategyTests.swift */; }; + 561F38F42AC9CE510051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38F32AC9CE510051EEE9 /* DatabaseDataDecodingStrategyTests.swift */; }; 562205F11E420E47005860AC /* DatabasePoolReleaseMemoryTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563363CF1C943D13000BE133 /* DatabasePoolReleaseMemoryTests.swift */; }; 562205F21E420E47005860AC /* DatabasePoolSchemaCacheTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 569531281C908A5B00CF1A2B /* DatabasePoolSchemaCacheTests.swift */; }; 562205F31E420E47005860AC /* DatabaseQueueReleaseMemoryTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563363D41C94484E000BE133 /* DatabaseQueueReleaseMemoryTests.swift */; }; @@ -68,9 +75,12 @@ 562393601DEE06D300A6B01F /* CursorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623935F1DEE06D300A6B01F /* CursorTests.swift */; }; 562393691DEE0CD200A6B01F /* FlattenCursorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562393681DEE0CD200A6B01F /* FlattenCursorTests.swift */; }; 562393721DEE104400A6B01F /* MapCursorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562393711DEE104400A6B01F /* MapCursorTests.swift */; }; + 5623B6142AED39A600436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6122AED39A600436239 /* DatabaseQueueInMemoryCopyTests.swift */; }; + 5623B6152AED39A600436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6132AED39A600436239 /* DatabaseQueueTemporaryCopyTests.swift */; }; 56256ED025D1ACD0008C2BDD /* Table.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56256ECF25D1ACD0008C2BDD /* Table.swift */; }; 56256ED925D1B316008C2BDD /* ForeignKey.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56256ED825D1B316008C2BDD /* ForeignKey.swift */; }; 562756431E963AAC0035B653 /* DatabaseWriterTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562756421E963AAC0035B653 /* DatabaseWriterTests.swift */; }; + 562B58CB2A29BBEF00E8C75D /* Issue1383.sqlite in Resources */ = {isa = PBXBuildFile; fileRef = 562B58CA2A29BBEF00E8C75D /* Issue1383.sqlite */; }; 562EA8261F17B2AC00FA528C /* CompilationProtocolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562EA8251F17B2AC00FA528C /* CompilationProtocolTests.swift */; }; 562EA82F1F17B9EB00FA528C /* CompilationSubClassTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562EA82E1F17B9EB00FA528C /* CompilationSubClassTests.swift */; }; 56300B781C53F592005A543B /* QueryInterfaceRequest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56300B6F1C53F592005A543B /* QueryInterfaceRequest.swift */; }; @@ -93,6 +103,7 @@ 563B8FB524A1D029007A48C9 /* ReceiveValuesOn.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563B8FB424A1D029007A48C9 /* ReceiveValuesOn.swift */; }; 563B8FC524A1D3B9007A48C9 /* OnDemandFuture.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563B8FC424A1D3B9007A48C9 /* OnDemandFuture.swift */; }; 563C67B324628BEA00E94EDC /* DatabasePoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563C67B224628BEA00E94EDC /* DatabasePoolTests.swift */; }; + 563CBBE12A595131008905CE /* SQLIndexGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563CBBE02A595131008905CE /* SQLIndexGenerator.swift */; }; 563DE4F3231A91E2005081B7 /* DatabaseConfigurationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563DE4EC231A91E2005081B7 /* DatabaseConfigurationTests.swift */; }; 563EF415215F87EB007DAACD /* OrderedDictionary.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563EF414215F87EB007DAACD /* OrderedDictionary.swift */; }; 563EF42D2161180D007DAACD /* AssociationAggregate.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563EF42C2161180D007DAACD /* AssociationAggregate.swift */; }; @@ -114,6 +125,7 @@ 56419C6D24A519A2004967E1 /* ValueObservationPublisherTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419A6E24A51601004967E1 /* ValueObservationPublisherTests.swift */; }; 56419C7824A51A38004967E1 /* Recorder.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419A5E24A51601004967E1 /* Recorder.swift */; }; 56419C7924A51A38004967E1 /* RecordingError.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419A5F24A51601004967E1 /* RecordingError.swift */; }; + 5642A3182AD66DFE0065F717 /* LineDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5642A3172AD66DFE0065F717 /* LineDumpFormat.swift */; }; 564448831EF56B1B00DD2861 /* DatabaseAfterNextTransactionCommitTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 564448821EF56B1B00DD2861 /* DatabaseAfterNextTransactionCommitTests.swift */; }; 564CE43121AA901800652B19 /* ValueConcurrentObserver.swift in Sources */ = {isa = PBXBuildFile; fileRef = 564CE43021AA901800652B19 /* ValueConcurrentObserver.swift */; }; 564CE4E921B2E06F00652B19 /* ValueObservationMapTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 564CE4E821B2E06F00652B19 /* ValueObservationMapTests.swift */; }; @@ -145,6 +157,7 @@ 5653EB0C20944C7C00F46237 /* HasManyAssociation.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5653EAFB20944C7B00F46237 /* HasManyAssociation.swift */; }; 5653EB2120944C7C00F46237 /* HasOneAssociation.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5653EB0220944C7C00F46237 /* HasOneAssociation.swift */; }; 5653EC122098738B00F46237 /* SQLGenerationContext.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5653EC0B2098738B00F46237 /* SQLGenerationContext.swift */; }; + 565498612B8B815500585804 /* PrivacyInfo.xcprivacy in Resources */ = {isa = PBXBuildFile; fileRef = 648704AD2B7E66390036480B /* PrivacyInfo.xcprivacy */; }; 5656A7FF22946B34001FF3FF /* ValueObservationQueryInterfaceRequestTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5656A7F822946B33001FF3FF /* ValueObservationQueryInterfaceRequestTests.swift */; }; 5656A81E2295B12F001FF3FF /* SQLAssociation.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5656A81D2295B12F001FF3FF /* SQLAssociation.swift */; }; 5656A8AD2295BFD7001FF3FF /* TableRecord+Association.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5656A8A92295BFD5001FF3FF /* TableRecord+Association.swift */; }; @@ -193,14 +206,24 @@ 5676FBA622F5CAD9004717D9 /* ValueObservationRegionRecordingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5676FB9F22F5CAD9004717D9 /* ValueObservationRegionRecordingTests.swift */; }; 56781B0B243F86E600650A83 /* Refinable.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56781B0A243F86E600650A83 /* Refinable.swift */; }; 5679533327E0A2FB004D18BD /* ValueWriteOnlyObserver.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5679533227E0A2FB004D18BD /* ValueWriteOnlyObserver.swift */; }; + 567B5BE72AD3284100629622 /* DatabaseReader+dump.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BE02AD3284100629622 /* DatabaseReader+dump.swift */; }; + 567B5BE82AD3284100629622 /* DumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BE12AD3284100629622 /* DumpFormat.swift */; }; + 567B5BE92AD3284100629622 /* QuoteDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BE32AD3284100629622 /* QuoteDumpFormat.swift */; }; + 567B5BEA2AD3284100629622 /* JSONDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BE42AD3284100629622 /* JSONDumpFormat.swift */; }; + 567B5BEB2AD3284100629622 /* DebugDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BE52AD3284100629622 /* DebugDumpFormat.swift */; }; + 567B5BEC2AD3284100629622 /* Database+Dump.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BE62AD3284100629622 /* Database+Dump.swift */; }; + 567B5C532AD32FF100629622 /* DatabaseDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BDC2AD3283500629622 /* DatabaseDumpTests.swift */; }; + 567B5C542AD32FF100629622 /* DatabaseReaderDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BDB2AD3283500629622 /* DatabaseReaderDumpTests.swift */; }; 567DAF1C1EAB61ED00FC0928 /* grdb_config.h in Headers */ = {isa = PBXBuildFile; fileRef = 567DAF141EAB61ED00FC0928 /* grdb_config.h */; settings = {ATTRIBUTES = (Private, ); }; }; 567DAF351EAB789800FC0928 /* DatabaseLogErrorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567DAF341EAB789800FC0928 /* DatabaseLogErrorTests.swift */; }; 567F45A81F888B2600030B59 /* TruncateOptimizationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567F45A71F888B2600030B59 /* TruncateOptimizationTests.swift */; }; 568068311EBBA26100EFB8AA /* SQLRequestTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568068301EBBA26100EFB8AA /* SQLRequestTests.swift */; }; 5682D721239582AA004B58C4 /* DatabaseSuspensionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5682D71A239582AA004B58C4 /* DatabaseSuspensionTests.swift */; }; 56848973242DE36F002F9702 /* ValueObservationScheduler.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56848972242DE36F002F9702 /* ValueObservationScheduler.swift */; }; + 5685C1932AD52EE600DA4B7A /* ListDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5685C1922AD52EE600DA4B7A /* ListDumpFormat.swift */; }; 56894F752606576600268F4D /* FoundationDecimalTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56894F742606576600268F4D /* FoundationDecimalTests.swift */; }; 56894FB72606589700268F4D /* Decimal.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56894F94260657D600268F4D /* Decimal.swift */; }; + 568C3F7A2A5AB2C300A2309D /* ForeignKeyDefinitionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F792A5AB2C300A2309D /* ForeignKeyDefinitionTests.swift */; }; 568D131F2207213E00674B58 /* SQLQueryGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568D13182207213E00674B58 /* SQLQueryGenerator.swift */; }; 568EB71929211E0800E59445 /* DatabaseSnapshotPool.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568EB71829211E0700E59445 /* DatabaseSnapshotPool.swift */; }; 568EB71E2921234800E59445 /* DatabaseSnapshotPoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568EB71D2921234800E59445 /* DatabaseSnapshotPoolTests.swift */; }; @@ -250,7 +273,10 @@ 56AACAA822ACED7100A40F2A /* Fetch.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AACAA722ACED7100A40F2A /* Fetch.swift */; }; 56AE64122229A53700AD1B0B /* HasOneThroughAssociation.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AE64112229A53700AD1B0B /* HasOneThroughAssociation.swift */; }; 56AE6424222AAC9500AD1B0B /* AssociationHasOneThroughSQLTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AE6423222AAC9500AD1B0B /* AssociationHasOneThroughSQLTests.swift */; }; + 56AFEF2F29969F6E00CA1E51 /* TransactionClock.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AFEF2E29969F6E00CA1E51 /* TransactionClock.swift */; }; + 56AFEF372996B9DC00CA1E51 /* TransactionDateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AFEF362996B9DC00CA1E51 /* TransactionDateTests.swift */; }; 56B021C91D8C0D3900B239BB /* MutablePersistableRecordPersistenceConflictPolicyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B021C81D8C0D3900B239BB /* MutablePersistableRecordPersistenceConflictPolicyTests.swift */; }; + 56B6AB062BD3DCAC009A0B71 /* SingletonUserDefaultsTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B6AB052BD3DCAC009A0B71 /* SingletonUserDefaultsTest.swift */; }; 56B6EF56208CB4E3002F0ACB /* ColumnExpressionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B6EF55208CB4E3002F0ACB /* ColumnExpressionTests.swift */; }; 56B7EE832863781300C0525F /* WALSnapshot.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B7EE822863781300C0525F /* WALSnapshot.swift */; }; 56B7F43A1BEB42D500E39BBF /* Migration.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B7F4391BEB42D500E39BBF /* Migration.swift */; }; @@ -279,6 +305,7 @@ 56D110D828AFC84000E64463 /* PersistableRecord+Insert.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D110D728AFC84000E64463 /* PersistableRecord+Insert.swift */; }; 56D110DD28AFC8B400E64463 /* PersistableRecord+Save.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D110DC28AFC8B400E64463 /* PersistableRecord+Save.swift */; }; 56D110FA28AFC97E00E64463 /* MutablePersistableRecord+DAO.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D110F928AFC97E00E64463 /* MutablePersistableRecord+DAO.swift */; }; + 56D3332029C38D6700430680 /* WALSnapshotTransaction.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D3331F29C38D6700430680 /* WALSnapshotTransaction.swift */; }; 56D496541D812F5B008276D7 /* SQLExpressionLiteralTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56A4CDAF1D4234B200B1A9B9 /* SQLExpressionLiteralTests.swift */; }; 56D496551D812F83008276D7 /* FoundationDataTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5657AB2F1D108BA9006283EF /* FoundationDataTests.swift */; }; 56D496571D81303E008276D7 /* FoundationDateComponentsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5690C3251D23E6D800E59934 /* FoundationDateComponentsTests.swift */; }; @@ -375,6 +402,14 @@ 56F34FC224B0A0B7007513FC /* SQLIdentifyingColumnsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F34FC124B0A0B7007513FC /* SQLIdentifyingColumnsTests.swift */; }; 56F3E7491E66F83A00BF0F01 /* ResultCodeTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F3E7481E66F83A00BF0F01 /* ResultCodeTests.swift */; }; 56F61DD5283D344E00AF9884 /* getThreadsCount.c in Sources */ = {isa = PBXBuildFile; fileRef = 56F61DD4283D344E00AF9884 /* getThreadsCount.c */; }; + 56F89DF72A57EAA9002FE2AA /* ColumnDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89DF62A57EAA9002FE2AA /* ColumnDefinition.swift */; }; + 56F89DFC2A57EAEA002FE2AA /* ForeignKeyDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89DFB2A57EAEA002FE2AA /* ForeignKeyDefinition.swift */; }; + 56F89DFE2A57EB19002FE2AA /* IndexDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89DFD2A57EB19002FE2AA /* IndexDefinition.swift */; }; + 56F89E002A57EB5C002FE2AA /* TableAlteration.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89DFF2A57EB5C002FE2AA /* TableAlteration.swift */; }; + 56F89E022A57EB87002FE2AA /* Database+SchemaDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E012A57EB87002FE2AA /* Database+SchemaDefinition.swift */; }; + 56F89E0C2A57EC16002FE2AA /* SQLTableGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E0B2A57EC16002FE2AA /* SQLTableGenerator.swift */; }; + 56F89E0E2A57EC2B002FE2AA /* SQLColumnGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E0D2A57EC2B002FE2AA /* SQLColumnGenerator.swift */; }; + 56F89E152A585C0B002FE2AA /* SQLTableAlterationGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E142A585C0B002FE2AA /* SQLTableAlterationGenerator.swift */; }; 56FA0C3028B1F2DC00B2DFF7 /* MutablePersistableRecord+Upsert.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FA0C2F28B1F2DC00B2DFF7 /* MutablePersistableRecord+Upsert.swift */; }; 56FA0C3928B20ABE00B2DFF7 /* PersistableRecord+Upsert.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FA0C3828B20ABE00B2DFF7 /* PersistableRecord+Upsert.swift */; }; 56FBFEDA2210731A00945324 /* SQLRequest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FBFED82210731A00945324 /* SQLRequest.swift */; }; @@ -404,6 +439,10 @@ 56012B742574048B00B4925B /* CommonTableExpression.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CommonTableExpression.swift; sourceTree = ""; }; 560233C32724234F00529DF3 /* SharedValueObservation.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SharedValueObservation.swift; sourceTree = ""; }; 560233C82724338800529DF3 /* SharedValueObservationTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SharedValueObservationTests.swift; sourceTree = ""; }; + 5603CEB62AC862EC00CF097D /* SQLJSONFunctions.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLJSONFunctions.swift; sourceTree = ""; }; + 5603CEB72AC862EC00CF097D /* SQLJSONExpressible.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLJSONExpressible.swift; sourceTree = ""; }; + 5603CEB82AC862EC00CF097D /* JSONColumn.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONColumn.swift; sourceTree = ""; }; + 5603CEC92AC8631600CF097D /* JSONExpressionsTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONExpressionsTests.swift; sourceTree = ""; }; 56043299228F00C2009D3FE2 /* OrderedDictionaryTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = OrderedDictionaryTests.swift; sourceTree = ""; }; 560432A2228F1667009D3FE2 /* AssociationPrefetchingObservationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationPrefetchingObservationTests.swift; sourceTree = ""; }; 5604484825DEEEF7002BAA79 /* AssociationPrefetchingRelationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationPrefetchingRelationTests.swift; sourceTree = ""; }; @@ -444,6 +483,9 @@ 561CFA7123735015000C8BAA /* TableRecordUpdateTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableRecordUpdateTests.swift; sourceTree = ""; }; 561CFA912376E546000C8BAA /* AssociationHasManyThroughOrderingTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationHasManyThroughOrderingTests.swift; sourceTree = ""; }; 561CFA9B2376EC86000C8BAA /* AssociationHasManyOrderingTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationHasManyOrderingTests.swift; sourceTree = ""; }; + 561F38D72AC88A550051EEE9 /* JSONColumnTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = JSONColumnTests.swift; sourceTree = ""; }; + 561F38EE2AC9CE130051EEE9 /* DatabaseDataEncodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataEncodingStrategyTests.swift; sourceTree = ""; }; + 561F38F32AC9CE510051EEE9 /* DatabaseDataDecodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataDecodingStrategyTests.swift; sourceTree = ""; }; 562393171DECC02000A6B01F /* RowFetchTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RowFetchTests.swift; sourceTree = ""; }; 5623932F1DEDFC5700A6B01F /* AnyCursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AnyCursorTests.swift; sourceTree = ""; }; 5623934D1DEDFEFB00A6B01F /* EnumeratedCursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = EnumeratedCursorTests.swift; sourceTree = ""; }; @@ -451,10 +493,13 @@ 5623935F1DEE06D300A6B01F /* CursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CursorTests.swift; sourceTree = ""; }; 562393681DEE0CD200A6B01F /* FlattenCursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FlattenCursorTests.swift; sourceTree = ""; }; 562393711DEE104400A6B01F /* MapCursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MapCursorTests.swift; sourceTree = ""; }; + 5623B6122AED39A600436239 /* DatabaseQueueInMemoryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueInMemoryCopyTests.swift; sourceTree = ""; }; + 5623B6132AED39A600436239 /* DatabaseQueueTemporaryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueTemporaryCopyTests.swift; sourceTree = ""; }; 5623E0901B4AFACC00B20B7F /* GRDBTestCase.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GRDBTestCase.swift; sourceTree = ""; }; 56256ECF25D1ACD0008C2BDD /* Table.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Table.swift; sourceTree = ""; }; 56256ED825D1B316008C2BDD /* ForeignKey.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ForeignKey.swift; sourceTree = ""; }; 562756421E963AAC0035B653 /* DatabaseWriterTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseWriterTests.swift; sourceTree = ""; }; + 562B58CA2A29BBEF00E8C75D /* Issue1383.sqlite */ = {isa = PBXFileReference; lastKnownFileType = file; path = Issue1383.sqlite; sourceTree = ""; }; 562EA8251F17B2AC00FA528C /* CompilationProtocolTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CompilationProtocolTests.swift; sourceTree = ""; }; 562EA82E1F17B9EB00FA528C /* CompilationSubClassTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CompilationSubClassTests.swift; sourceTree = ""; }; 56300B5D1C53C38F005A543B /* QueryInterfaceRequestTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = QueryInterfaceRequestTests.swift; sourceTree = ""; }; @@ -488,6 +533,7 @@ 563B8FB424A1D029007A48C9 /* ReceiveValuesOn.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ReceiveValuesOn.swift; sourceTree = ""; }; 563B8FC424A1D3B9007A48C9 /* OnDemandFuture.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = OnDemandFuture.swift; sourceTree = ""; }; 563C67B224628BEA00E94EDC /* DatabasePoolTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabasePoolTests.swift; sourceTree = ""; }; + 563CBBE02A595131008905CE /* SQLIndexGenerator.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLIndexGenerator.swift; sourceTree = ""; }; 563DE4EC231A91E2005081B7 /* DatabaseConfigurationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseConfigurationTests.swift; sourceTree = ""; }; 563EF414215F87EB007DAACD /* OrderedDictionary.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = OrderedDictionary.swift; sourceTree = ""; }; 563EF42C2161180D007DAACD /* AssociationAggregate.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = AssociationAggregate.swift; sourceTree = ""; }; @@ -509,7 +555,7 @@ 56419A6C24A51601004967E1 /* Support.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Support.swift; sourceTree = ""; }; 56419A6D24A51601004967E1 /* DatabaseRegionObservationPublisherTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DatabaseRegionObservationPublisherTests.swift; sourceTree = ""; }; 56419A6E24A51601004967E1 /* ValueObservationPublisherTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ValueObservationPublisherTests.swift; sourceTree = ""; }; - 5643676D272EDF2400C718C7 /* Scripts */ = {isa = PBXFileReference; lastKnownFileType = folder; path = Scripts; sourceTree = ""; }; + 5642A3172AD66DFE0065F717 /* LineDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = LineDumpFormat.swift; sourceTree = ""; }; 564448821EF56B1B00DD2861 /* DatabaseAfterNextTransactionCommitTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseAfterNextTransactionCommitTests.swift; sourceTree = ""; }; 564A50C61BFF4B7F00B3A3A2 /* DatabaseCollationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseCollationTests.swift; sourceTree = ""; }; 564CE43021AA901800652B19 /* ValueConcurrentObserver.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ValueConcurrentObserver.swift; sourceTree = ""; }; @@ -608,15 +654,25 @@ 56781B0A243F86E600650A83 /* Refinable.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Refinable.swift; sourceTree = ""; }; 5679533227E0A2FB004D18BD /* ValueWriteOnlyObserver.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ValueWriteOnlyObserver.swift; sourceTree = ""; }; 567A80521D41350C00C7DCEC /* IndexInfoTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = IndexInfoTests.swift; sourceTree = ""; }; + 567B5BDB2AD3283500629622 /* DatabaseReaderDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseReaderDumpTests.swift; sourceTree = ""; }; + 567B5BDC2AD3283500629622 /* DatabaseDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDumpTests.swift; sourceTree = ""; }; + 567B5BE02AD3284100629622 /* DatabaseReader+dump.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "DatabaseReader+dump.swift"; sourceTree = ""; }; + 567B5BE12AD3284100629622 /* DumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DumpFormat.swift; sourceTree = ""; }; + 567B5BE32AD3284100629622 /* QuoteDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = QuoteDumpFormat.swift; sourceTree = ""; }; + 567B5BE42AD3284100629622 /* JSONDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONDumpFormat.swift; sourceTree = ""; }; + 567B5BE52AD3284100629622 /* DebugDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DebugDumpFormat.swift; sourceTree = ""; }; + 567B5BE62AD3284100629622 /* Database+Dump.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "Database+Dump.swift"; sourceTree = ""; }; 567DAF141EAB61ED00FC0928 /* grdb_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = grdb_config.h; sourceTree = ""; }; 567DAF341EAB789800FC0928 /* DatabaseLogErrorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseLogErrorTests.swift; sourceTree = ""; }; 567F45A71F888B2600030B59 /* TruncateOptimizationTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TruncateOptimizationTests.swift; sourceTree = ""; }; 568068301EBBA26100EFB8AA /* SQLRequestTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLRequestTests.swift; sourceTree = ""; }; 5682D71A239582AA004B58C4 /* DatabaseSuspensionTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseSuspensionTests.swift; sourceTree = ""; }; 56848972242DE36F002F9702 /* ValueObservationScheduler.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ValueObservationScheduler.swift; sourceTree = ""; }; + 5685C1922AD52EE600DA4B7A /* ListDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListDumpFormat.swift; sourceTree = ""; }; 5687359E1CEDE16C009B9116 /* Betty.jpeg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = Betty.jpeg; sourceTree = ""; }; 56894F742606576600268F4D /* FoundationDecimalTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FoundationDecimalTests.swift; sourceTree = ""; }; 56894F94260657D600268F4D /* Decimal.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Decimal.swift; sourceTree = ""; }; + 568C3F792A5AB2C300A2309D /* ForeignKeyDefinitionTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ForeignKeyDefinitionTests.swift; sourceTree = ""; }; 568D13182207213E00674B58 /* SQLQueryGenerator.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLQueryGenerator.swift; sourceTree = ""; }; 568EB71829211E0700E59445 /* DatabaseSnapshotPool.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DatabaseSnapshotPool.swift; sourceTree = ""; }; 568EB71D2921234800E59445 /* DatabaseSnapshotPoolTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DatabaseSnapshotPoolTests.swift; sourceTree = ""; }; @@ -711,8 +767,11 @@ 56AE64112229A53700AD1B0B /* HasOneThroughAssociation.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = HasOneThroughAssociation.swift; sourceTree = ""; }; 56AE6423222AAC9500AD1B0B /* AssociationHasOneThroughSQLTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationHasOneThroughSQLTests.swift; sourceTree = ""; }; 56AF746A1D41FB9C005E9FF3 /* DatabaseValueConvertibleEscapingTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseValueConvertibleEscapingTests.swift; sourceTree = ""; }; + 56AFEF2E29969F6E00CA1E51 /* TransactionClock.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TransactionClock.swift; sourceTree = ""; }; + 56AFEF362996B9DC00CA1E51 /* TransactionDateTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TransactionDateTests.swift; sourceTree = ""; }; 56B021C81D8C0D3900B239BB /* MutablePersistableRecordPersistenceConflictPolicyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MutablePersistableRecordPersistenceConflictPolicyTests.swift; sourceTree = ""; }; 56B14E7E1D4DAE54000BF4A3 /* RowFromDictionaryLiteralTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RowFromDictionaryLiteralTests.swift; sourceTree = ""; }; + 56B6AB052BD3DCAC009A0B71 /* SingletonUserDefaultsTest.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SingletonUserDefaultsTest.swift; sourceTree = ""; }; 56B6EF55208CB4E3002F0ACB /* ColumnExpressionTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ColumnExpressionTests.swift; sourceTree = ""; }; 56B7EE822863781300C0525F /* WALSnapshot.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WALSnapshot.swift; sourceTree = ""; }; 56B7F4291BE14A1900E39BBF /* CGFloatTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CGFloatTests.swift; sourceTree = ""; }; @@ -749,6 +808,7 @@ 56D110D728AFC84000E64463 /* PersistableRecord+Insert.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "PersistableRecord+Insert.swift"; sourceTree = ""; }; 56D110DC28AFC8B400E64463 /* PersistableRecord+Save.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "PersistableRecord+Save.swift"; sourceTree = ""; }; 56D110F928AFC97E00E64463 /* MutablePersistableRecord+DAO.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "MutablePersistableRecord+DAO.swift"; sourceTree = ""; }; + 56D3331F29C38D6700430680 /* WALSnapshotTransaction.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WALSnapshotTransaction.swift; sourceTree = ""; }; 56D5075D1F6BAE8600AE1C5B /* PrimaryKeyInfoTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PrimaryKeyInfoTests.swift; sourceTree = ""; }; 56D51CFF1EA789FA0074638A /* FetchableRecord+TableRecord.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "FetchableRecord+TableRecord.swift"; sourceTree = ""; }; 56D91AA12205E03700770D8D /* SQLRelation.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLRelation.swift; sourceTree = ""; }; @@ -778,6 +838,14 @@ 56F61DD0283D344D00AF9884 /* GRDBTests-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "GRDBTests-Bridging-Header.h"; path = "GRDBTests/GRDBTests-Bridging-Header.h"; sourceTree = ""; }; 56F61DD3283D344E00AF9884 /* getThreadsCount.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = getThreadsCount.h; sourceTree = ""; }; 56F61DD4283D344E00AF9884 /* getThreadsCount.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = getThreadsCount.c; sourceTree = ""; }; + 56F89DF62A57EAA9002FE2AA /* ColumnDefinition.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ColumnDefinition.swift; sourceTree = ""; }; + 56F89DFB2A57EAEA002FE2AA /* ForeignKeyDefinition.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ForeignKeyDefinition.swift; sourceTree = ""; }; + 56F89DFD2A57EB19002FE2AA /* IndexDefinition.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = IndexDefinition.swift; sourceTree = ""; }; + 56F89DFF2A57EB5C002FE2AA /* TableAlteration.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TableAlteration.swift; sourceTree = ""; }; + 56F89E012A57EB87002FE2AA /* Database+SchemaDefinition.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = "Database+SchemaDefinition.swift"; sourceTree = ""; }; + 56F89E0B2A57EC16002FE2AA /* SQLTableGenerator.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLTableGenerator.swift; sourceTree = ""; }; + 56F89E0D2A57EC2B002FE2AA /* SQLColumnGenerator.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLColumnGenerator.swift; sourceTree = ""; }; + 56F89E142A585C0B002FE2AA /* SQLTableAlterationGenerator.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLTableAlterationGenerator.swift; sourceTree = ""; }; 56FA0C2F28B1F2DC00B2DFF7 /* MutablePersistableRecord+Upsert.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "MutablePersistableRecord+Upsert.swift"; sourceTree = ""; }; 56FA0C3828B20ABE00B2DFF7 /* PersistableRecord+Upsert.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "PersistableRecord+Upsert.swift"; sourceTree = ""; }; 56FBFED82210731A00945324 /* SQLRequest.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLRequest.swift; sourceTree = ""; }; @@ -787,6 +855,7 @@ 56FF453F1D2C23BA00F21EF9 /* TableRecordDeleteTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableRecordDeleteTests.swift; sourceTree = ""; }; 56FF45551D2CDA5200F21EF9 /* RecordUniqueIndexTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RecordUniqueIndexTests.swift; sourceTree = ""; }; 6340BF7F1E5E3F7900832805 /* RecordPersistenceConflictPolicy.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RecordPersistenceConflictPolicy.swift; sourceTree = ""; }; + 648704AD2B7E66390036480B /* PrivacyInfo.xcprivacy */ = {isa = PBXFileReference; lastKnownFileType = text.xml; path = PrivacyInfo.xcprivacy; sourceTree = ""; }; C96C0F242084A442006B2981 /* SQLiteDateParser.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLiteDateParser.swift; sourceTree = ""; }; D263F40926C613090038B07F /* DatabaseColumnEncodingStrategyTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DatabaseColumnEncodingStrategyTests.swift; sourceTree = ""; }; DC2393C61ABE35F8003FF113 /* GRDB-Bridging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "GRDB-Bridging.h"; sourceTree = ""; }; @@ -815,6 +884,25 @@ /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ + 5603CEB52AC862EC00CF097D /* JSON */ = { + isa = PBXGroup; + children = ( + 5603CEB82AC862EC00CF097D /* JSONColumn.swift */, + 5603CEB72AC862EC00CF097D /* SQLJSONExpressible.swift */, + 5603CEB62AC862EC00CF097D /* SQLJSONFunctions.swift */, + ); + path = JSON; + sourceTree = ""; + }; + 5603CEC82AC8630300CF097D /* JSON */ = { + isa = PBXGroup; + children = ( + 561F38D72AC88A550051EEE9 /* JSONColumnTests.swift */, + 5603CEC92AC8631600CF097D /* JSONExpressionsTests.swift */, + ); + name = JSON; + sourceTree = ""; + }; 5605F1471C672E4000235C62 /* Support */ = { isa = PBXGroup; children = ( @@ -894,6 +982,7 @@ isa = PBXGroup; children = ( D263F40926C613090038B07F /* DatabaseColumnEncodingStrategyTests.swift */, + 561F38EE2AC9CE130051EEE9 /* DatabaseDataEncodingStrategyTests.swift */, 5665FA322129EEA0004D8612 /* DatabaseDateEncodingStrategyTests.swift */, 56703290212B544F007D270F /* DatabaseUUIDEncodingStrategyTests.swift */, 566A843F2041914000E50BFD /* MutablePersistableRecordChangesTests.swift */, @@ -921,12 +1010,14 @@ 56176C581EACC2D8000F3F2B /* GRDBTests */ = { isa = PBXGroup; children = ( + 56677C14241D14450050755D /* FailureTestCase.swift */, + 5623E0901B4AFACC00B20B7F /* GRDBTestCase.swift */, 562EA81E1F17B26F00FA528C /* Compilation */, 56A238111B9C74A90082EB20 /* Core */, - 56677C14241D14450050755D /* FailureTestCase.swift */, + 567B5BDA2AD3281B00629622 /* Dump */, 5698AC3E1DA2BEBB0056AF8C /* FTS */, 56176CA01EACEE2A000F3F2B /* GRDBCipher */, - 5623E0901B4AFACC00B20B7F /* GRDBTestCase.swift */, + 5603CEC82AC8630300CF097D /* JSON */, 56A238231B9C74A90082EB20 /* Migrations */, 569978D31B539038005EBEED /* Private */, 56300B5C1C53C38F005A543B /* QueryInterface */, @@ -1002,6 +1093,7 @@ 56012B542573EED000B4925B /* CommonTableExpressionTests.swift */, 56EA63C4209C7CE3009715B8 /* DerivableRequestTests.swift */, 56300B601C53C42C005A543B /* FetchableRecord+QueryInterfaceRequestTests.swift */, + 568C3F792A5AB2C300A2309D /* ForeignKeyDefinitionTests.swift */, 56300B671C53D25E005A543B /* QueryInterfaceExpressionsTests.swift */, 5698AC021D9B9FCF0056AF8C /* QueryInterfaceExtensibilityTests.swift */, 563EF45221631E21007DAACD /* QueryInterfacePromiseTests.swift */, @@ -1041,8 +1133,10 @@ 5687359E1CEDE16C009B9116 /* Betty.jpeg */, 5672DE581CDB72520022BA81 /* DatabaseQueueBackupTests.swift */, 563363BC1C93FD5E000BE133 /* DatabaseQueueConcurrencyTests.swift */, + 5623B6122AED39A600436239 /* DatabaseQueueInMemoryCopyTests.swift */, 56A238141B9C74A90082EB20 /* DatabaseQueueInMemoryTests.swift */, 567156151CB142AA007DC145 /* DatabaseQueueReadOnlyTests.swift */, + 5623B6132AED39A600436239 /* DatabaseQueueTemporaryCopyTests.swift */, 569178451CED9B6000E179EA /* DatabaseQueueTests.swift */, ); name = DatabaseQueue; @@ -1051,6 +1145,7 @@ 563B06BF2185CD1700B38F35 /* ValueObservation */ = { isa = PBXGroup; children = ( + 562B58CA2A29BBEF00E8C75D /* Issue1383.sqlite */, 560233C82724338800529DF3 /* SharedValueObservationTests.swift */, 563B06F621861D8300B38F35 /* ValueObservationCountTests.swift */, 563B071721862F4C00B38F35 /* ValueObservationDatabaseValueConvertibleTests.swift */, @@ -1107,13 +1202,6 @@ path = GRDBCombineTests; sourceTree = ""; }; - 564390D42414FC2C00BA61E6 /* Frameworks */ = { - isa = PBXGroup; - children = ( - ); - name = Frameworks; - sourceTree = ""; - }; 5653EABF20944B1300F46237 /* Association */ = { isa = PBXGroup; children = ( @@ -1171,6 +1259,11 @@ 5656A8142295AF75001FF3FF /* Schema */ = { isa = PBXGroup; children = ( + 56F89DF62A57EAA9002FE2AA /* ColumnDefinition.swift */, + 56F89E012A57EB87002FE2AA /* Database+SchemaDefinition.swift */, + 56F89DFB2A57EAEA002FE2AA /* ForeignKeyDefinition.swift */, + 56F89DFD2A57EB19002FE2AA /* IndexDefinition.swift */, + 56F89DFF2A57EB5C002FE2AA /* TableAlteration.swift */, 566AD8B11D5318F4002EC1A8 /* TableDefinition.swift */, 5698AC771DA37DCB0056AF8C /* VirtualTableModule.swift */, ); @@ -1180,8 +1273,12 @@ 5656A8162295AFD6001FF3FF /* SQLGeneration */ = { isa = PBXGroup; children = ( + 56F89E0D2A57EC2B002FE2AA /* SQLColumnGenerator.swift */, 5653EC0B2098738B00F46237 /* SQLGenerationContext.swift */, + 563CBBE02A595131008905CE /* SQLIndexGenerator.swift */, 568D13182207213E00674B58 /* SQLQueryGenerator.swift */, + 56F89E142A585C0B002FE2AA /* SQLTableAlterationGenerator.swift */, + 56F89E0B2A57EC16002FE2AA /* SQLTableGenerator.swift */, ); path = SQLGeneration; sourceTree = ""; @@ -1247,6 +1344,7 @@ 5674A7251F30A8EF0095F066 /* FetchableRecord */ = { isa = PBXGroup; children = ( + 561F38F32AC9CE510051EEE9 /* DatabaseDataDecodingStrategyTests.swift */, 5665FA132129C9D6004D8612 /* DatabaseDateDecodingStrategyTests.swift */, 5674A7261F30A9090095F066 /* FetchableRecordDecodableTests.swift */, 565B0FEE1BBC7D980098DE03 /* FetchableRecordTests.swift */, @@ -1254,6 +1352,38 @@ name = FetchableRecord; sourceTree = ""; }; + 567B5BDA2AD3281B00629622 /* Dump */ = { + isa = PBXGroup; + children = ( + 567B5BDC2AD3283500629622 /* DatabaseDumpTests.swift */, + 567B5BDB2AD3283500629622 /* DatabaseReaderDumpTests.swift */, + ); + name = Dump; + sourceTree = ""; + }; + 567B5BDF2AD3284100629622 /* Dump */ = { + isa = PBXGroup; + children = ( + 567B5BE62AD3284100629622 /* Database+Dump.swift */, + 567B5BE02AD3284100629622 /* DatabaseReader+dump.swift */, + 567B5BE12AD3284100629622 /* DumpFormat.swift */, + 567B5BE22AD3284100629622 /* DumpFormats */, + ); + path = Dump; + sourceTree = ""; + }; + 567B5BE22AD3284100629622 /* DumpFormats */ = { + isa = PBXGroup; + children = ( + 567B5BE32AD3284100629622 /* QuoteDumpFormat.swift */, + 567B5BE42AD3284100629622 /* JSONDumpFormat.swift */, + 5642A3172AD66DFE0065F717 /* LineDumpFormat.swift */, + 5685C1922AD52EE600DA4B7A /* ListDumpFormat.swift */, + 567B5BE52AD3284100629622 /* DebugDumpFormat.swift */, + ); + path = DumpFormats; + sourceTree = ""; + }; 569530FA1C9067CC00CF1A2B /* Crash */ = { isa = PBXGroup; children = ( @@ -1368,6 +1498,7 @@ 568068301EBBA26100EFB8AA /* SQLRequestTests.swift */, 56A238201B9C74A90082EB20 /* Statement */, 56E8CE0F1BB4FE5B00828BEC /* StatementColumnConvertibleFetchTests.swift */, + 56AFEF362996B9DC00CA1E51 /* TransactionDateTests.swift */, 5607EFD11BB8253300605DE3 /* TransactionObserver */, ); name = Core; @@ -1423,6 +1554,7 @@ children = ( 564E73DE203D50B9000C443C /* JoinSupportTests.swift */, 5616B4FA28B5F5220052017E /* SingletonRecordTest.swift */, + 56B6AB052BD3DCAC009A0B71 /* SingletonUserDefaultsTest.swift */, 5674A7251F30A8EF0095F066 /* FetchableRecord */, 560B3FA41C19DFF800C58EC7 /* PersistableRecord */, 56176C9E1EACEDF9000F3F2B /* Record */, @@ -1461,12 +1593,14 @@ 566B9C1F25C6CC24004542CF /* RowDecodingError.swift */, 56BB6EA81D3009B100A1CA52 /* SchedulingWatchdog.swift */, 560A37A61C8FF6E500949E71 /* SerializedDatabase.swift */, + 56D3331F29C38D6700430680 /* WALSnapshotTransaction.swift */, 56E9FAD7221053DC00C703A8 /* SQL.swift */, 569D6DDD220EF9E100A058A9 /* SQLInterpolation.swift */, 56FBFED82210731A00945324 /* SQLRequest.swift */, 56A238781B9C75030082EB20 /* Statement.swift */, 566B912A1FA4D0CC0012D5B0 /* StatementAuthorizer.swift */, 560D923F1C672C3E00F4F92B /* StatementColumnConvertible.swift */, + 56AFEF2E29969F6E00CA1E51 /* TransactionClock.swift */, 566B91321FA4D3810012D5B0 /* TransactionObserver.swift */, 56B7EE822863781300C0525F /* WALSnapshot.swift */, 5605F1471C672E4000235C62 /* Support */, @@ -1579,8 +1713,6 @@ DC37742D19C8CC90004FCF85 /* GRDB */, DC10500F19C904DD00D8CA30 /* Tests */, DC3773F419C8CBB3004FCF85 /* Products */, - 564390D42414FC2C00BA61E6 /* Frameworks */, - 5643676D272EDF2400C718C7 /* Scripts */, ); indentWidth = 4; sourceTree = ""; @@ -1610,8 +1742,11 @@ children = ( 56A2FA3524424D2A00E97D23 /* Export.swift */, 566DDE0C288D763C0000DCFB /* Fixits.swift */, + 648704AD2B7E66390036480B /* PrivacyInfo.xcprivacy */, 56A2386F1B9C75030082EB20 /* Core */, + 567B5BDF2AD3284100629622 /* Dump */, 5698AC291D9E5A480056AF8C /* FTS */, + 5603CEB52AC862EC00CF097D /* JSON */, 56A238911B9C750B0082EB20 /* Migration */, 56300B6D1C53F592005A543B /* QueryInterface */, 56A2389F1B9C753B0082EB20 /* Record */, @@ -1734,6 +1869,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 562B58CB2A29BBEF00E8C75D /* Issue1383.sqlite in Resources */, 569BBA4622906A8200478429 /* InflectionsTests.json in Resources */, 56D496C21D81374C008276D7 /* Betty.jpeg in Resources */, ); @@ -1743,6 +1879,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 565498612B8B815500585804 /* PrivacyInfo.xcprivacy in Resources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1795,6 +1932,7 @@ 56DA7CF7260FA9D400A8D97B /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */, 56D496691D813086008276D7 /* QueryInterfaceExpressionsTests.swift in Sources */, 562393691DEE0CD200A6B01F /* FlattenCursorTests.swift in Sources */, + 567B5C532AD32FF100629622 /* DatabaseDumpTests.swift in Sources */, 562205F21E420E47005860AC /* DatabasePoolSchemaCacheTests.swift in Sources */, 56D496B01D813385008276D7 /* DatabaseErrorTests.swift in Sources */, 56176C5C1EACCCC7000F3F2B /* FTS5TableBuilderTests.swift in Sources */, @@ -1806,6 +1944,7 @@ 56CC9243201E034D00CB597E /* PrefixWhileCursorTests.swift in Sources */, 560714E3227DD0810091BB10 /* AssociationPrefetchingSQLTests.swift in Sources */, 56D496841D813147008276D7 /* SelectStatementTests.swift in Sources */, + 561F38D82AC88A550051EEE9 /* JSONColumnTests.swift in Sources */, 56D496B11D8133BC008276D7 /* DatabaseQueueReadOnlyTests.swift in Sources */, 56D4968C1D81316E008276D7 /* RawRepresentable+DatabaseValueConvertibleTests.swift in Sources */, 56419C6D24A519A2004967E1 /* ValueObservationPublisherTests.swift in Sources */, @@ -1820,6 +1959,7 @@ 56D496BF1D8135D4008276D7 /* TableDefinitionTests.swift in Sources */, 5674A7171F3087710095F066 /* DatabaseValueConvertibleDecodableTests.swift in Sources */, 56D496801D813131008276D7 /* StatementColumnConvertibleFetchTests.swift in Sources */, + 5603CED42AC8642F00CF097D /* JSONExpressionsTests.swift in Sources */, 563B0705218627F800B38F35 /* ValueObservationRowTests.swift in Sources */, 56D4966E1D81309E008276D7 /* RecordPrimaryKeyMultipleTests.swift in Sources */, 56D496891D81316E008276D7 /* DatabaseValueConvertibleFetchTests.swift in Sources */, @@ -1879,6 +2019,7 @@ 56419C6B24A519A2004967E1 /* Support.swift in Sources */, 56D496871D81316E008276D7 /* DatabaseTimestampTests.swift in Sources */, 5615B26A222AFE8F00061C1C /* AssociationHasOneThroughRowScopeTests.swift in Sources */, + 5623B6152AED39A600436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */, 561CFA9C2376EC86000C8BAA /* AssociationHasManyOrderingTests.swift in Sources */, 56176C5A1EACCCC7000F3F2B /* FTS5PatternTests.swift in Sources */, 56D496581D81304E008276D7 /* FoundationDateTests.swift in Sources */, @@ -1897,6 +2038,7 @@ 56D496B81D813465008276D7 /* DataMemoryTests.swift in Sources */, 563B06CA2185D2E500B38F35 /* ValueObservationFetchTests.swift in Sources */, 56D496541D812F5B008276D7 /* SQLExpressionLiteralTests.swift in Sources */, + 561F38F42AC9CE510051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */, 56D496961D81317B008276D7 /* PersistableRecordTests.swift in Sources */, 5616B4FB28B5F5220052017E /* SingletonRecordTest.swift in Sources */, 56419C5724A51998004967E1 /* Inverted.swift in Sources */, @@ -1907,6 +2049,7 @@ 56057C552291B16A00A7CB10 /* AssociationHasManyRowScopeTests.swift in Sources */, 56FEB8F8248403000081AF83 /* DatabaseTraceTests.swift in Sources */, 56419C5124A51998004967E1 /* Finished.swift in Sources */, + 561F38EF2AC9CE130051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */, 56176C5E1EACCCC7000F3F2B /* FTS5WrapperTokenizerTests.swift in Sources */, 564D4F7E261C6DC200F55856 /* CaseInsensitiveIdentifierTests.swift in Sources */, 56FEE7FB1F47253700D930EA /* TableRecordTests.swift in Sources */, @@ -1923,6 +2066,7 @@ 562393601DEE06D300A6B01F /* CursorTests.swift in Sources */, 5653EAE420944B4F00F46237 /* AssociationParallelRowScopesTests.swift in Sources */, 56D4968F1D81316E008276D7 /* RowFromDictionaryTests.swift in Sources */, + 567B5C542AD32FF100629622 /* DatabaseReaderDumpTests.swift in Sources */, 56915782231BF28B00E1D237 /* PoolTests.swift in Sources */, 56419C6C24A519A2004967E1 /* DatabaseRegionObservationPublisherTests.swift in Sources */, 56D4968E1D81316E008276D7 /* RowCopiedFromStatementTests.swift in Sources */, @@ -1945,11 +2089,14 @@ 562393181DECC02000A6B01F /* RowFetchTests.swift in Sources */, 56677C0D241CD0D00050755D /* ValueObservationRecorder.swift in Sources */, 5653EADA20944B4F00F46237 /* AssociationRowScopeSearchTests.swift in Sources */, + 56B6AB062BD3DCAC009A0B71 /* SingletonUserDefaultsTest.swift in Sources */, 563B5336267E2F90009549B5 /* TableTests.swift in Sources */, 56D4965A1D81304E008276D7 /* FoundationNSDataTests.swift in Sources */, 56D496791D81309E008276D7 /* RecordWithColumnNameManglingTests.swift in Sources */, 56D4966C1D81309E008276D7 /* RecordMinimalPrimaryKeyRowIDTests.swift in Sources */, 564CE5BE21B8FFA300652B19 /* DatabaseRegionObservationTests.swift in Sources */, + 5623B6142AED39A600436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */, + 56AFEF372996B9DC00CA1E51 /* TransactionDateTests.swift in Sources */, 564F9C1E1F069B4E00877A00 /* DatabaseAggregateTests.swift in Sources */, D263F40A26C613090038B07F /* DatabaseColumnEncodingStrategyTests.swift in Sources */, 5653EAEE20944B4F00F46237 /* AssociationParallelDecodableRecordTests.swift in Sources */, @@ -1959,6 +2106,7 @@ 56D496C11D81373A008276D7 /* DatabaseQueueBackupTests.swift in Sources */, 562393721DEE104400A6B01F /* MapCursorTests.swift in Sources */, 56D496571D81303E008276D7 /* FoundationDateComponentsTests.swift in Sources */, + 568C3F7A2A5AB2C300A2309D /* ForeignKeyDefinitionTests.swift in Sources */, 56D496701D81309E008276D7 /* RecordPrimaryKeyRowIDTests.swift in Sources */, 5622060C1E420EB3005860AC /* DatabaseQueueConcurrencyTests.swift in Sources */, 56E5D8041B4D424400430942 /* GRDBTestCase.swift in Sources */, @@ -2014,6 +2162,7 @@ files = ( 568ECA9F25D7E53E00B71526 /* SQLOrdering.swift in Sources */, 56713FDD2691F409006153C3 /* JSONRequiredEncoder.swift in Sources */, + 5685C1932AD52EE600DA4B7A /* ListDumpFormat.swift in Sources */, 56158780288D875E00A67323 /* Optional.swift in Sources */, 563EF42D2161180D007DAACD /* AssociationAggregate.swift in Sources */, 5616AAF1207CD45E00AC3664 /* RequestProtocols.swift in Sources */, @@ -2047,6 +2196,7 @@ 563082E42430B6BE00C14A05 /* DatabaseCancellable.swift in Sources */, 564CE59D21B7A8B500652B19 /* RemoveDuplicates.swift in Sources */, 5698AD351DABAF4A0056AF8C /* FTS5CustomTokenizer.swift in Sources */, + 56F89DFE2A57EB19002FE2AA /* IndexDefinition.swift in Sources */, 566B91131FA4C3F50012D5B0 /* DatabaseCollation.swift in Sources */, 5605F1591C672E4000235C62 /* CGFloat.swift in Sources */, 5674A7031F307FCD0095F066 /* DatabaseValueConvertible+ReferenceConvertible.swift in Sources */, @@ -2061,11 +2211,13 @@ 563B8FB524A1D029007A48C9 /* ReceiveValuesOn.swift in Sources */, 564F9C2D1F075DD200877A00 /* DatabaseFunction.swift in Sources */, 564CE5AC21B8FAB400652B19 /* DatabaseRegionObservation.swift in Sources */, + 567B5BE72AD3284100629622 /* DatabaseReader+dump.swift in Sources */, 5659F4981EA8D989004A4992 /* Pool.swift in Sources */, 56848973242DE36F002F9702 /* ValueObservationScheduler.swift in Sources */, 5674A6F41F307F600095F066 /* EncodableRecord+Encodable.swift in Sources */, 5664759A1D97D8A000FF74B8 /* SQLCollection.swift in Sources */, 56D110DD28AFC8B400E64463 /* PersistableRecord+Save.swift in Sources */, + 56F89E022A57EB87002FE2AA /* Database+SchemaDefinition.swift in Sources */, 567404881CEF84C8003ED5CC /* RowAdapter.swift in Sources */, 5653EB0320944C7C00F46237 /* BelongsToAssociation.swift in Sources */, 563363C41C942C37000BE133 /* DatabaseWriter.swift in Sources */, @@ -2079,6 +2231,8 @@ 4E13D2F32769B87F0037588C /* DatabaseBackupProgress.swift in Sources */, 560A37A71C8FF6E500949E71 /* SerializedDatabase.swift in Sources */, 563B8FAC24A1CE43007A48C9 /* DatabasePublishers.swift in Sources */, + 56F89E0C2A57EC16002FE2AA /* SQLTableGenerator.swift in Sources */, + 56F89DFC2A57EAEA002FE2AA /* ForeignKeyDefinition.swift in Sources */, 56FA0C3028B1F2DC00B2DFF7 /* MutablePersistableRecord+Upsert.swift in Sources */, 5605F1691C672E4000235C62 /* NSString.swift in Sources */, 560D92401C672C3E00F4F92B /* DatabaseValueConvertible.swift in Sources */, @@ -2086,6 +2240,7 @@ 56CEB5011EAA2F4D00BFAF62 /* FTS4.swift in Sources */, 56D110C928AFC68B00E64463 /* MutablePersistableRecord+Save.swift in Sources */, 56AE64122229A53700AD1B0B /* HasOneThroughAssociation.swift in Sources */, + 567B5BEA2AD3284100629622 /* JSONDumpFormat.swift in Sources */, 56A238811B9C75030082EB20 /* DatabaseError.swift in Sources */, 56256ED925D1B316008C2BDD /* ForeignKey.swift in Sources */, 56D51D001EA789FA0074638A /* FetchableRecord+TableRecord.swift in Sources */, @@ -2094,10 +2249,13 @@ 56A238851B9C75030082EB20 /* DatabaseValue.swift in Sources */, 5671FC201DA3CAC9003BF4FF /* FTS3TokenizerDescriptor.swift in Sources */, 56A238A41B9C753B0082EB20 /* Record.swift in Sources */, + 567B5BE92AD3284100629622 /* QuoteDumpFormat.swift in Sources */, 56CEB5451EAA359A00BFAF62 /* Column.swift in Sources */, 5657AB0F1D10899D006283EF /* URL.swift in Sources */, 560D924B1C672C4B00F4F92B /* TableRecord.swift in Sources */, 56DAA2DB1DE9C827006E10C8 /* Cursor.swift in Sources */, + 56F89E152A585C0B002FE2AA /* SQLTableAlterationGenerator.swift in Sources */, + 56AFEF2F29969F6E00CA1E51 /* TransactionClock.swift in Sources */, 5674A6EB1F307F0E0095F066 /* DatabaseValueConvertible+Encodable.swift in Sources */, 56D91AA92205F2F100770D8D /* DatabasePromise.swift in Sources */, 56959629222C462D002CB7C9 /* HasManyThroughAssociation.swift in Sources */, @@ -2110,8 +2268,11 @@ 56D110C428AFC5A800E64463 /* MutablePersistableRecord+Update.swift in Sources */, 5690C3401D23E82A00E59934 /* Data.swift in Sources */, 5659F4881EA8D94E004A4992 /* Utils.swift in Sources */, + 567B5BE82AD3284100629622 /* DumpFormat.swift in Sources */, 566BE71E2342542F00A8254B /* LockedBox.swift in Sources */, 56A238931B9C750B0082EB20 /* DatabaseMigrator.swift in Sources */, + 5603CEBB2AC862EC00CF097D /* SQLJSONExpressible.swift in Sources */, + 56F89DF72A57EAA9002FE2AA /* ColumnDefinition.swift in Sources */, 5611620825757583007AAF99 /* JoinAssociation.swift in Sources */, 5695311F1C907A8C00CF1A2B /* DatabaseSchemaCache.swift in Sources */, 560233C42724234F00529DF3 /* SharedValueObservation.swift in Sources */, @@ -2119,8 +2280,10 @@ 568D131F2207213E00674B58 /* SQLQueryGenerator.swift in Sources */, 5605F15D1C672E4000235C62 /* DatabaseDateComponents.swift in Sources */, 56894FB72606589700268F4D /* Decimal.swift in Sources */, + 56F89E002A57EB5C002FE2AA /* TableAlteration.swift in Sources */, 56B964B91DA51D0A0002DA19 /* FTS5Pattern.swift in Sources */, 56FBFEDA2210731A00945324 /* SQLRequest.swift in Sources */, + 56F89E0E2A57EC2B002FE2AA /* SQLColumnGenerator.swift in Sources */, 563363C01C942C04000BE133 /* DatabaseReader.swift in Sources */, 564CE43121AA901800652B19 /* ValueConcurrentObserver.swift in Sources */, 5605F1651C672E4000235C62 /* NSNull.swift in Sources */, @@ -2137,10 +2300,14 @@ 56D110BF28AFC51000E64463 /* MutablePersistableRecord+Insert.swift in Sources */, 566B9C2025C6CC24004542CF /* RowDecodingError.swift in Sources */, 5698AD211DABAEFA0056AF8C /* FTS5WrapperTokenizer.swift in Sources */, + 5603CEBC2AC862EC00CF097D /* JSONColumn.swift in Sources */, 56A238831B9C75030082EB20 /* DatabaseQueue.swift in Sources */, + 5642A3182AD66DFE0065F717 /* LineDumpFormat.swift in Sources */, 5605F1671C672E4000235C62 /* NSNumber.swift in Sources */, 56E9FADA221053DD00C703A8 /* SQL.swift in Sources */, + 5603CEBA2AC862EC00CF097D /* SQLJSONFunctions.swift in Sources */, 56717271261C68E900423B6F /* CaseInsensitiveIdentifier.swift in Sources */, + 563CBBE12A595131008905CE /* SQLIndexGenerator.swift in Sources */, C96C0F2B2084A442006B2981 /* SQLiteDateParser.swift in Sources */, 56781B0B243F86E600650A83 /* Refinable.swift in Sources */, 56A238871B9C75030082EB20 /* Row.swift in Sources */, @@ -2149,10 +2316,13 @@ 5653EC122098738B00F46237 /* SQLGenerationContext.swift in Sources */, 560D92471C672C4B00F4F92B /* MutablePersistableRecord.swift in Sources */, 5674A6E41F307F0E0095F066 /* DatabaseValueConvertible+Decodable.swift in Sources */, + 56D3332029C38D6700430680 /* WALSnapshotTransaction.swift in Sources */, 5653EB0C20944C7C00F46237 /* HasManyAssociation.swift in Sources */, 5657AAB91D107001006283EF /* NSData.swift in Sources */, 560D92421C672C3E00F4F92B /* StatementColumnConvertible.swift in Sources */, + 567B5BEC2AD3284100629622 /* Database+Dump.swift in Sources */, 56CEB5531EAA359A00BFAF62 /* SQLExpression.swift in Sources */, + 567B5BEB2AD3284100629622 /* DebugDumpFormat.swift in Sources */, 56B9649D1DA51B4C0002DA19 /* FTS5.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0; diff --git a/GRDB.xcworkspace/contents.xcworkspacedata b/GRDB.xcworkspace/contents.xcworkspacedata index 1fec2cce91..3258ed7a7b 100644 --- a/GRDB.xcworkspace/contents.xcworkspacedata +++ b/GRDB.xcworkspace/contents.xcworkspacedata @@ -17,6 +17,9 @@ location = "group:MyPlayground.playground"> + + diff --git a/GRDB/Core/Configuration.swift b/GRDB/Core/Configuration.swift index 2316c2cb14..6b261902b6 100644 --- a/GRDB/Core/Configuration.swift +++ b/GRDB/Core/Configuration.swift @@ -169,6 +169,21 @@ public struct Configuration { /// ``` public var publicStatementArguments = false + /// The clock that feeds ``Database/transactionDate``. + /// + /// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) + /// + /// The default clock is ``DefaultTransactionClock`` (which returns the + /// start date of the current transaction). + /// + /// For example: + /// + /// ```swift + /// var config = Configuration() + /// config.transactionClock = .custom { db in /* return some Date */ } + /// ``` + public var transactionClock: any TransactionClock = .default + // MARK: - Managing SQLite Connections private var setups: [(Database) throws -> Void] = [] @@ -273,6 +288,46 @@ public struct Configuration { /// of a read access. public var allowsUnsafeTransactions = false + // MARK: - Journal Mode + + /// Defines how the journal mode is configured when the database + /// connection is opened. + /// + /// Related SQLite documentation: + public enum JournalModeConfiguration: Sendable { + /// The default setup has ``DatabaseQueue`` perform no specific + /// configuration of the journal mode, and ``DatabasePool`` + /// configure the database for the WAL mode (just like the + /// ``wal`` case). + case `default` + + /// The journal mode is set to WAL (plus extra configurations that + /// make life easier with WAL databases). + case wal + } + + /// Defines how the journal mode is configured when the database + /// connection is opened. + /// + /// This configuration is ignored when ``readonly`` is true. + /// + /// The default value has ``DatabaseQueue`` perform no specific + /// configuration of the journal mode, and ``DatabasePool`` configure + /// the database for the WAL mode. + /// + /// Applications that need to open a WAL database with a + /// ``DatabaseQueue`` should set the `journalMode` to `wal`: + /// + /// ```swift + /// // Open a WAL database with DatabaseQueue + /// var config = Configuration() + /// config.journalMode = .wal + /// let dbQueue = try DatabaseQueue(path: "...", configuration: config) + /// ``` + /// + /// Related SQLite documentation: + public var journalMode = JournalModeConfiguration.default + // MARK: - Concurrency /// Defines the how `SQLITE_BUSY` errors are handled. @@ -286,13 +341,15 @@ public struct Configuration { /// If nil, GRDB picks a default one. var readonlyBusyMode: Database.BusyMode? = nil - /// The maximum number of concurrent readers. + /// The maximum number of concurrent reader connections. /// - /// This configuration applies to ``DatabasePool`` only. The default value - /// is 5. + /// This configuration has effect on ``DatabasePool`` and + /// ``DatabaseSnapshotPool`` only. The default value is 5. /// /// You can query this value at runtime in order to get the actual capacity - /// for concurrent reads of any ``DatabaseReader``. For example: + /// for concurrent reads of any ``DatabaseReader``. In this context, + /// ``DatabaseQueue`` and ``DatabaseSnapshot`` have a capacity of 1, + /// because they can't perform two concurrent reads. For example: /// /// ```swift /// var config = Configuration() @@ -306,6 +363,7 @@ public struct Configuration { /// print(dbQueue.configuration.maximumReaderCount) // 1 /// print(dbPool.configuration.maximumReaderCount) // 5 /// print(dbSnapshot.configuration.maximumReaderCount) // 1 + /// ``` public var maximumReaderCount: Int = 5 /// The quality of service of database accesses. @@ -357,7 +415,22 @@ public struct Configuration { /// The default is true. public var automaticMemoryManagement = true #endif - + + /// A boolean value indicating whether read-only connections should be + /// kept open. + /// + /// This configuration flag applies to ``DatabasePool`` only. The + /// default value is false. + /// + /// When the flag is false, a `DatabasePool` closes read-only + /// connections when requested to dispose non-essential memory with + /// ``DatabasePool/releaseMemory()``. When true, those connections are + /// kept open. + /// + /// Consider setting this flag to true when profiling your application + /// reveals that a lot of time is spent opening new SQLite connections. + public var persistentReadOnlyConnections = false + // MARK: - Factory Configuration /// Creates a factory configuration. @@ -406,7 +479,7 @@ public struct Configuration { /// Creates a DispatchQueue which has the quality of service and target /// queue of read accesses. func makeReaderDispatchQueue(label: String) -> DispatchQueue { - if let targetQueue = targetQueue { + if let targetQueue { return DispatchQueue(label: label, target: targetQueue) } else { return DispatchQueue(label: label, qos: qos) diff --git a/GRDB/Core/Cursor.swift b/GRDB/Core/Cursor.swift index 58834bb859..b8ee021552 100644 --- a/GRDB/Core/Cursor.swift +++ b/GRDB/Core/Cursor.swift @@ -733,6 +733,11 @@ public final class AnyCursor: Cursor { } } +// Explicit non-conformance to Sendable: a type-erased cursor can't be more +// sendable than non-sendable cursors (such as `DatabaseCursor`). +@available(*, unavailable) +extension AnyCursor: Sendable { } + /// A `Cursor` that consumes and drops n elements from an underlying `Base` /// cursor before possibly returning the first available element. public final class DropFirstCursor { @@ -747,6 +752,11 @@ public final class DropFirstCursor { } } +// Explicit non-conformance to Sendable: `DropFirstCursor` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension DropFirstCursor: Sendable { } + extension DropFirstCursor: Cursor { public func next() throws -> Base.Element? { while dropped < limit { @@ -773,6 +783,11 @@ public final class DropWhileCursor { } } +// Explicit non-conformance to Sendable: `DropWhileCursor` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension DropWhileCursor: Sendable { } + extension DropWhileCursor: Cursor { public func next() throws -> Base.Element? { if predicateHasFailed { @@ -818,6 +833,11 @@ public final class EnumeratedCursor { } } +// Explicit non-conformance to Sendable: `EnumeratedCursor` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension EnumeratedCursor: Sendable { } + extension EnumeratedCursor: Cursor { public func next() throws -> (Int, Base.Element)? { guard let element = try base.next() else { return nil } @@ -875,6 +895,11 @@ public final class FlattenCursor where Base.Element: Cursor { } } +// Explicit non-conformance to Sendable: `EnumeratedCursor` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension FlattenCursor: Sendable { } + extension FlattenCursor: Cursor { public func next() throws -> Base.Element.Element? { while true { @@ -901,6 +926,11 @@ public final class MapCursor { } } +// Explicit non-conformance to Sendable: There is no known reason for making +// it thread-safe (`transform` a Sendable closure). +@available(*, unavailable) +extension MapCursor: Sendable { } + extension MapCursor: Cursor { public func next() throws -> Element? { guard let element = try base.next() else { return nil } @@ -927,6 +957,11 @@ public final class PrefixCursor { } } +// Explicit non-conformance to Sendable: `PrefixCursor` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension PrefixCursor: Sendable { } + extension PrefixCursor: Cursor { public func next() throws -> Base.Element? { if taken >= maxLength { return nil } @@ -954,6 +989,11 @@ public final class PrefixWhileCursor { } } +// Explicit non-conformance to Sendable: `PrefixCursor` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension PrefixWhileCursor: Sendable { } + extension PrefixWhileCursor: Cursor { public func next() throws -> Base.Element? { if !predicateHasFailed, let nextElement = try base.next() { diff --git a/GRDB/Core/Database+Schema.swift b/GRDB/Core/Database+Schema.swift index a4350e39f9..878cbe72bf 100644 --- a/GRDB/Core/Database+Schema.swift +++ b/GRDB/Core/Database+Schema.swift @@ -57,6 +57,14 @@ extension Database { case let .attached(name): return "\(name).sqlite_master" } } + + /// The name of the master sqlite table, without the schema name. + var unqualifiedMasterTableName: String { // swiftlint:disable:this inclusive_language + switch self { + case .main, .attached: return "sqlite_master" + case .temp: return "sqlite_temp_master" + } + } } /// The identifier of a database table or view. @@ -146,6 +154,19 @@ extension Database { return schemaIdentifiers } + /// The `SchemaIdentifier` named `schemaName` if it exists. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or + /// if no such schema exists. + private func schemaIdentifier(named schemaName: String) throws -> SchemaIdentifier { + let allIdentifiers = try schemaIdentifiers() + if let result = allIdentifiers.first(where: { $0.sql.lowercased() == schemaName.lowercased() }) { + return result + } else { + throw DatabaseError.noSuchSchema(schemaName) + } + } + #if GRDBCUSTOMSQLITE || GRDBCIPHER /// Returns information about a table or a view func table(_ tableName: String) throws -> TableInfo? { @@ -202,10 +223,19 @@ extension Database { return tableInfo } - /// Returns whether a table exists, in the main or temp schema, or in an - /// attached database. - public func tableExists(_ name: String) throws -> Bool { - try schemaIdentifiers().contains { + /// Returns whether a table exists + /// + /// When `schemaName` is not specified, known schemas are iterated in + /// SQLite resolution order and the first matching result is returned. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or + /// if the specified schema does not exist + public func tableExists(_ name: String, in schemaName: String? = nil) throws -> Bool { + if let schemaName { + return try exists(type: .table, name: name, in: schemaName) + } + + return try schemaIdentifiers().contains { try exists(type: .table, name: name, in: $0) } } @@ -238,20 +268,57 @@ extension Database { /// Returns whether a view exists, in the main or temp schema, or in an /// attached database. - public func viewExists(_ name: String) throws -> Bool { - try schemaIdentifiers().contains { + /// + /// When `schemaName` is not specified, known schemas are iterated in + /// SQLite resolution order and the first matching result is returned. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or + /// if the specified schema does not exist + public func viewExists(_ name: String, in schemaName: String? = nil) throws -> Bool { + if let schemaName { + return try exists(type: .view, name: name, in: schemaName) + } + + return try schemaIdentifiers().contains { try exists(type: .view, name: name, in: $0) } } /// Returns whether a trigger exists, in the main or temp schema, or in an /// attached database. - public func triggerExists(_ name: String) throws -> Bool { - try schemaIdentifiers().contains { + /// + /// When `schemaName` is not specified, known schemas are iterated in + /// SQLite resolution order and the first matching result is returned. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or + /// if the specified schema does not exist + public func triggerExists(_ name: String, in schemaName: String? = nil) throws -> Bool { + if let schemaName { + return try exists(type: .trigger, name: name, in: schemaName) + } + + return try schemaIdentifiers().contains { try exists(type: .trigger, name: name, in: $0) } } + /// Checks if an entity exists in a given schema + /// + /// This is checking for the existence of the entity specified by + /// `type` and `name`. It is assumed that the existence of a schema + /// named `schemaName` is already known and will throw an error if it + /// cannot be found. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or + /// if the specified schema does not exist + private func exists(type: SchemaObjectType, name: String, in schemaName: String) throws -> Bool { + if let schemaID = try schemaIdentifiers().first(where: { $0.sql.lowercased() == schemaName.lowercased() }) { + return try exists(type: type, name: name, in: schemaID) + } else { + throw DatabaseError.noSuchSchema(schemaName) + } + } + private func exists(type: SchemaObjectType, name: String, in schemaID: SchemaIdentifier) throws -> Bool { // SQLite identifiers are case-insensitive, case-preserving: // http://www.alberton.info/dbms_identifiers_and_case_sensitivity.html @@ -264,10 +331,17 @@ extension Database { /// table has no explicit primary key, the result is the hidden /// "rowid" column. /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or if no - /// such table exists in the main or temp schema, or in an - /// attached database. - public func primaryKey(_ tableName: String) throws -> PrimaryKeyInfo { + /// When `schemaName` is not specified, known schemas are iterated in + /// SQLite resolution order and the first matching result is returned. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, if + /// the specified schema does not exist, or if no such table exists in + /// the main or temp schema, or in an attached database. + public func primaryKey(_ tableName: String, in schemaName: String? = nil) throws -> PrimaryKeyInfo { + if let schemaName { + return try introspect(tableNamed: tableName, inSchemaNamed: schemaName, using: primaryKey(_:)) + } + for schemaIdentifier in try schemaIdentifiers() { if let result = try primaryKey(TableIdentifier(schemaID: schemaIdentifier, name: tableName)) { return result @@ -354,13 +428,13 @@ extension Database { // FIXME: We ignore the exception, and consider all INTEGER primary // keys as aliases for the rowid: if pkColumn.type.uppercased() == "INTEGER" { - primaryKey = .rowID(pkColumn.name) + primaryKey = .rowID(pkColumn) } else { - primaryKey = try .regular([pkColumn.name], tableHasRowID: tableHasRowID(table)) + primaryKey = try .regular([pkColumn], tableHasRowID: tableHasRowID(table)) } default: // Multi-columns primary key - primaryKey = try .regular(pkColumns.map(\.name), tableHasRowID: tableHasRowID(table)) + primaryKey = try .regular(pkColumns, tableHasRowID: tableHasRowID(table)) } schemaCache[table.schemaID].set(primaryKey: .value(primaryKey), forTable: table.name) @@ -387,7 +461,7 @@ extension Database { return try self.table(for: table)!.hasRowID } #else - if #available(iOS 15.4, macOS 12.4, tvOS 15.4, watchOS 8.5, *) { + if #available(iOS 15.4, macOS 12.4, tvOS 15.4, watchOS 8.5, *) { // SQLite 3.37+ return try self.table(for: table)!.hasRowID } #endif @@ -426,10 +500,18 @@ extension Database { /// the columns contain the primary key or a unique index, use /// ``table(_:hasUniqueKey:)``. /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or if no - /// such table exists in the main or temp schema, or in an - /// attached database. - public func indexes(on tableName: String) throws -> [IndexInfo] { + /// When `schemaName` is not specified, known schemas are iterated in + /// SQLite resolution order and the first matching result is returned. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, if + /// the specified schema does not exist, or if no such table or view + /// with this name exists in the main or temp schema, or in an attached + /// database. + public func indexes(on tableName: String, in schemaName: String? = nil) throws -> [IndexInfo] { + if let schemaName { + return try introspect(tableNamed: tableName, inSchemaNamed: schemaName, using: indexes(on:)) + } + for schemaIdentifier in try schemaIdentifiers() { if let result = try indexes(on: TableIdentifier(schemaID: schemaIdentifier, name: tableName)) { return result @@ -518,10 +600,18 @@ extension Database { /// Returns the foreign keys defined on table named `tableName`. /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or if no - /// such table exists in the main or temp schema, or in an - /// attached database. - public func foreignKeys(on tableName: String) throws -> [ForeignKeyInfo] { + /// When `schemaName` is not specified, known schemas are iterated in + /// SQLite resolution order and the first matching result is returned. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, if + /// the specified schema does not exist, or if no such table or view + /// with this name exists in the main or temp schema, or in an attached + /// database. + public func foreignKeys(on tableName: String, in schemaName: String? = nil) throws -> [ForeignKeyInfo] { + if let schemaName { + return try introspect(tableNamed: tableName, inSchemaNamed: schemaName, using: foreignKeys(on:)) + } + for schemaIdentifier in try schemaIdentifiers() { if let result = try foreignKeys(on: TableIdentifier(schemaID: schemaIdentifier, name: tableName)) { return result @@ -602,10 +692,27 @@ extension Database { /// Returns a cursor over foreign key violations in the table. /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or if no - /// such table exists in the main or temp schema, or in an - /// attached database. - public func foreignKeyViolations(in tableName: String) throws -> RecordCursor { + /// When `schemaName` is not specified, known schemas are checked in + /// SQLite resolution order and the first matching table is used. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, if + /// the specified schema does not exist, or if no such table or view + /// with this name exists in the main or temp schema, or in an attached + /// database. + public func foreignKeyViolations( + in tableName: String, + in schemaName: String? = nil) + throws -> RecordCursor + { + if let schemaName { + let schemaID = try schemaIdentifier(named: schemaName) + if try exists(type: .table, name: tableName, in: schemaID) { + return try foreignKeyViolations(in: TableIdentifier(schemaID: schemaID, name: tableName)) + } else { + throw DatabaseError.noSuchTable(tableName) + } + } + for schemaIdentifier in try schemaIdentifiers() { if try exists(type: .table, name: tableName, in: schemaIdentifier) { return try foreignKeyViolations(in: TableIdentifier(schemaID: schemaIdentifier, name: tableName)) @@ -634,14 +741,21 @@ extension Database { /// Throws an error if there exists a foreign key violation in the table. /// + /// When `schemaName` is not specified, known schemas are checked in + /// SQLite resolution order and the first matching table is used. + /// /// On the first foreign key violation found in the table, this method /// throws a ``DatabaseError`` with extended code /// `SQLITE_CONSTRAINT_FOREIGNKEY`. /// /// If you are looking for the list of foreign key violations, prefer - /// ``foreignKeyViolations(in:)`` instead. - public func checkForeignKeys(in tableName: String) throws { - try checkForeignKeys(from: foreignKeyViolations(in: tableName)) + /// ``foreignKeyViolations(in:in:)`` instead. + /// + /// - throws: A ``DatabaseError`` as described above; when a + /// specified schema does not exist; if no such table or view with this + /// name exists in the main or temp schema or in an attached database. + public func checkForeignKeys(in tableName: String, in schemaName: String? = nil) throws { + try checkForeignKeys(from: foreignKeyViolations(in: tableName, in: schemaName)) } private func checkForeignKeys(from violations: RecordCursor) throws { @@ -658,9 +772,17 @@ extension Database { /// attached database. func canonicalTableName(_ tableName: String) throws -> String? { for schemaIdentifier in try schemaIdentifiers() { + // Regular tables if let result = try schema(schemaIdentifier).canonicalName(tableName, ofType: .table) { return result } + + // Master table (sqlite_master, sqlite_temp_master) + // swiftlint:disable:next inclusive_language + let masterTableName = schemaIdentifier.unqualifiedMasterTableName + if tableName.lowercased() == masterTableName.lowercased() { + return masterTableName + } } return nil } @@ -673,16 +795,44 @@ extension Database { schemaCache[schemaID].schemaInfo = schemaInfo return schemaInfo } + + /// Attempts to perform a table introspection function on a given + /// table and schema + /// + /// - parameter tableName: The name of the table to examine + /// - parameter schemaName: The name of the schema to check + /// - parameter introspector: An introspection function taking a + /// `TableIdentifier` as the only parameter + private func introspect( + tableNamed tableName: String, + inSchemaNamed schemaName: String, + using introspector: (TableIdentifier) throws -> T? + ) throws -> T { + let schemaIdentifier = try schemaIdentifier(named: schemaName) + if let result = try introspector(TableIdentifier(schemaID: schemaIdentifier, name: tableName)) { + return result + } else { + throw DatabaseError.noSuchTable(tableName) + } + } } extension Database { /// Returns the columns in a table or a view. /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or if no - /// such table or view with this name exists in the main or temp schema, or - /// in an attached database. - public func columns(in tableName: String) throws -> [ColumnInfo] { + /// When `schemaName` is not specified, known schemas are iterated in + /// SQLite resolution order and the first matching result is returned. + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, if + /// the specified schema does not exist,or if no such table or view + /// with this name exists in the main or temp schema, or in an attached + /// database. + public func columns(in tableName: String, in schemaName: String? = nil) throws -> [ColumnInfo] { + if let schemaName { + return try introspect(tableNamed: tableName, inSchemaNamed: schemaName, using: columns(in:)) + } + for schemaIdentifier in try schemaIdentifiers() { if let result = try columns(in: TableIdentifier(schemaID: schemaIdentifier, name: tableName)) { return result @@ -840,14 +990,14 @@ extension Database { /// Information about a column of a database table. /// -/// You get `ColumnInfo` instances with the ``Database/columns(in:)`` +/// You get `ColumnInfo` instances with the ``Database/columns(in:in:)`` /// `Database` method. /// /// Related SQLite documentation: /// /// - [pragma `table_info`](https://www.sqlite.org/pragma.html#pragma_table_info) /// - [pragma `table_xinfo`](https://www.sqlite.org/pragma.html#pragma_table_xinfo) -public struct ColumnInfo: FetchableRecord { +public struct ColumnInfo: FetchableRecord, Sendable { let cid: Int let hidden: Int? @@ -858,8 +1008,22 @@ public struct ColumnInfo: FetchableRecord { /// /// The casing of this string depends on the SQLite version: make sure you /// process this string in a case-insensitive way. + /// + /// The type is the empty string when the column has no declared type. public let type: String + /// The column data type (nil when the column has no declared type). + /// + /// The casing of the raw value depends on the SQLite version: make sure + /// you process the result in a case-insensitive way. + var columnType: Database.ColumnType? { + if type.isEmpty { + return nil + } else { + return Database.ColumnType(rawValue: type) + } + } + /// A boolean value indicating if the column is constrained to be not null. public let isNotNull: Bool @@ -912,16 +1076,16 @@ public struct ColumnInfo: FetchableRecord { /// Information about an index. /// -/// You get `ForeignKeyInfo` instances with the ``Database/indexes(on:)`` +/// You get `IndexInfo` instances with the ``Database/indexes(on:in:)`` /// `Database` method. /// /// Related SQLite documentation: /// /// - [pragma `index_list`](https://www.sqlite.org/pragma.html#pragma_index_list) /// - [pragma `index_info`](https://www.sqlite.org/pragma.html#pragma_index_info) -public struct IndexInfo { +public struct IndexInfo: Sendable{ /// The origin of an index. - public struct Origin: RawRepresentable, Equatable, DatabaseValueConvertible { + public struct Origin: RawRepresentable, Equatable, DatabaseValueConvertible, Sendable { public var rawValue: String public init(rawValue: String) { @@ -955,7 +1119,7 @@ public struct IndexInfo { /// /// You get instances of `ForeignKeyViolation` from the `Database` methods /// ``Database/foreignKeyViolations()`` and -/// ``Database/foreignKeyViolations(in:)`` methods. +/// ``Database/foreignKeyViolations(in:in:)`` methods. /// /// For example: /// @@ -994,7 +1158,7 @@ public struct IndexInfo { /// ``` /// /// Related SQLite documentation: -public struct ForeignKeyViolation { +public struct ForeignKeyViolation: Sendable { /// The name of the table that contains the foreign key. public var originTable: String @@ -1009,7 +1173,7 @@ public struct ForeignKeyViolation { /// The id of the foreign key constraint that failed. /// /// This id matches the ``ForeignKeyInfo/id`` property in - /// ``ForeignKeyInfo``. See ``Database/foreignKeys(on:)``. + /// ``ForeignKeyInfo``. See ``Database/foreignKeys(on:in:)``. public var foreignKeyId: Int /// A precise description of the foreign key violation. @@ -1032,7 +1196,7 @@ public struct ForeignKeyViolation { }) var description: String - if let foreignKey = foreignKey { + if let foreignKey { description = """ FOREIGN KEY constraint violation - \ from \(originTable)(\(foreignKey.originColumns.joined(separator: ", "))) \ @@ -1042,9 +1206,9 @@ public struct ForeignKeyViolation { description = "FOREIGN KEY constraint violation - from \(originTable) to \(destinationTable)" } - if let originRow = originRow { + if let originRow { description += ", in \(String(describing: originRow))" - } else if let originRowID = originRowID { + } else if let originRowID { description += ", in rowid \(originRowID)" } @@ -1084,7 +1248,7 @@ extension ForeignKeyViolation: CustomStringConvertible { /// /// See also ``failureDescription(_:)``. public var description: String { - if let originRowID = originRowID { + if let originRowID { return """ FOREIGN KEY constraint violation - from \(originTable) to \(destinationTable), \ in rowid \(originRowID) @@ -1099,7 +1263,7 @@ extension ForeignKeyViolation: CustomStringConvertible { /// Information about a primary key. /// -/// You get `PrimaryKeyInfo` instances with the ``Database/primaryKey(_:)`` +/// You get `PrimaryKeyInfo` instances with the ``Database/primaryKey(_:in:)`` /// `Database` method. /// /// When the table's primary key is the rowid: @@ -1143,29 +1307,29 @@ extension ForeignKeyViolation: CustomStringConvertible { /// pk.rowIDColumn // nil /// pk.isRowID // false /// ``` -public struct PrimaryKeyInfo { +public struct PrimaryKeyInfo: Sendable { private enum Impl { /// The hidden rowID. case hiddenRowID /// An INTEGER PRIMARY KEY column that aliases the Row ID. /// Associated string is the column name. - case rowID(String) + case rowID(ColumnInfo) /// Any primary key, but INTEGER PRIMARY KEY. /// Associated strings are column names. - case regular(columns: [String], tableHasRowID: Bool) + case regular(columnInfos: [ColumnInfo], tableHasRowID: Bool) } private let impl: Impl - static func rowID(_ column: String) -> PrimaryKeyInfo { - PrimaryKeyInfo(impl: .rowID(column)) + static func rowID(_ columnInfo: ColumnInfo) -> PrimaryKeyInfo { + PrimaryKeyInfo(impl: .rowID(columnInfo)) } - static func regular(_ columns: [String], tableHasRowID: Bool) -> PrimaryKeyInfo { - assert(!columns.isEmpty) - return PrimaryKeyInfo(impl: .regular(columns: columns, tableHasRowID: tableHasRowID)) + static func regular(_ columnInfos: [ColumnInfo], tableHasRowID: Bool) -> PrimaryKeyInfo { + assert(!columnInfos.isEmpty) + return PrimaryKeyInfo(impl: .regular(columnInfos: columnInfos, tableHasRowID: tableHasRowID)) } static let hiddenRowID = PrimaryKeyInfo(impl: .hiddenRowID) @@ -1175,21 +1339,34 @@ public struct PrimaryKeyInfo { switch impl { case .hiddenRowID: return [Column.rowID.name] - case let .rowID(column): - return [column] - case let .regular(columns: columns, tableHasRowID: _): - return columns + case let .rowID(columnInfo): + return [columnInfo.name] + case let .regular(columnInfos: columnInfos, tableHasRowID: _): + return columnInfos.map(\.name) } } + /// The columns in the primary key. Nil if the primary key is the + /// hidden rowID. Never empty otherwise. + var columnInfos: [ColumnInfo]? { + switch impl { + case .hiddenRowID: + return nil + case let .rowID(columnInfo): + return [columnInfo] + case let .regular(columnInfos: columnInfos, tableHasRowID: _): + return columnInfos + } + } + /// When not nil, the name of the column that contains the /// `INTEGER PRIMARY KEY`. public var rowIDColumn: String? { switch impl { case .hiddenRowID: return nil - case .rowID(let column): - return column + case .rowID(let columnInfo): + return columnInfo.name case .regular: return nil } @@ -1216,7 +1393,7 @@ public struct PrimaryKeyInfo { return true case .rowID: return true - case let .regular(columns: _, tableHasRowID: tableHasRowID): + case let .regular(columnInfos: _, tableHasRowID: tableHasRowID): return tableHasRowID } } @@ -1252,11 +1429,11 @@ public struct PrimaryKeyInfo { /// Information about a foreign key. /// -/// You get `ForeignKeyInfo` instances with the ``Database/foreignKeys(on:)`` +/// You get `ForeignKeyInfo` instances with the ``Database/foreignKeys(on:in:)`` /// `Database` method. /// /// Related SQLite documentation: [pragma `foreign_key_list`](https://www.sqlite.org/pragma.html#pragma_foreign_key_list). -public struct ForeignKeyInfo { +public struct ForeignKeyInfo: Sendable { /// The first column in the output of the `foreign_key_list` pragma. public var id: Int @@ -1313,22 +1490,39 @@ struct TableInfo: FetchableRecord { } } -enum SchemaObjectType: String { - case index - case table - case trigger - case view +/// A value in the `type` column of `sqlite_master`. +struct SchemaObjectType: Hashable, RawRepresentable, DatabaseValueConvertible { + var rawValue: String + static let index = SchemaObjectType(rawValue: "index") + static let table = SchemaObjectType(rawValue: "table") + static let trigger = SchemaObjectType(rawValue: "trigger") + static let view = SchemaObjectType(rawValue: "view") +} + +/// A row in `sqlite_master`. +struct SchemaObject: Hashable, FetchableRecord { + var type: SchemaObjectType + var name: String + var tbl_name: String? + var sql: String? + + init(row: Row) throws { + // "rootpage" column is not always there: avoid using numerical indexes + type = row["type"] + name = row["name"] + tbl_name = row["tbl_name"] + sql = row["sql"] + } } /// All objects in a database schema (tables, views, indexes, triggers). struct SchemaInfo: Equatable { - private var objects: Set + let objects: Set /// Returns whether there exists a object of given type with this name /// (case-insensitive). func containsObjectNamed(_ name: String, ofType type: SchemaObjectType) -> Bool { let name = name.lowercased() - let type = type.rawValue return objects.contains { $0.type == type && $0.name.lowercased() == name } @@ -1341,15 +1535,12 @@ struct SchemaInfo: Equatable { func canonicalName(_ name: String, ofType type: SchemaObjectType) -> String? { let name = name.lowercased() return objects - .first { $0.type == type.rawValue && $0.name.lowercased() == name }? + .first { $0.type == type && $0.name.lowercased() == name }? .name } - private struct SchemaObject: Codable, Hashable, FetchableRecord { - var type: String - var name: String - var tbl_name: String? - var sql: String? + func filter(_ isIncluded: (SchemaObject) -> Bool) -> Self { + SchemaInfo(objects: objects.filter(isIncluded)) } } diff --git a/GRDB/Core/Database+Statements.swift b/GRDB/Core/Database+Statements.swift index 6a8d52f013..061c5b3646 100644 --- a/GRDB/Core/Database+Statements.swift +++ b/GRDB/Core/Database+Statements.swift @@ -361,6 +361,11 @@ public class SQLStatementCursor { } } +// Explicit non-conformance to Sendable: database cursors must be used from +// a serialized database access dispatch queue. +@available(*, unavailable) +extension SQLStatementCursor: Sendable { } + extension SQLStatementCursor: Cursor { public func next() throws -> Statement? { guard offset < cString.count - 1 /* trailing \0 */ else { @@ -373,7 +378,7 @@ extension SQLStatementCursor: Cursor { let baseAddress = buffer.baseAddress! // never nil because the buffer contains the trailing \0. // Compile next statement - var statementEnd: UnsafePointer? = nil + var statementEnd: UnsafePointer? = nil let statement = try Statement( database: database, statementStart: baseAddress + offset, @@ -406,8 +411,8 @@ extension SQLStatementCursor: Cursor { /// Check that all arguments were consumed: it is a programmer error to /// provide arguments that do not match the statements. private func checkArgumentsAreEmpty() throws { - if let arguments = arguments, - let initialArgumentCount = initialArgumentCount, + if let arguments, + let initialArgumentCount, arguments.values.isEmpty == false { throw DatabaseError( @@ -445,6 +450,8 @@ extension Database { clearSchemaCache() } + checkForAutocommitTransition() + // Database observation: cleanup try observationBroker?.statementDidExecute(statement) } @@ -460,6 +467,8 @@ extension Database { internalStatementCache.remove(statement) publicStatementCache.remove(statement) + checkForAutocommitTransition() + // Extract values that may be modified by the user in their // `TransactionObserver.databaseDidRollback(_:)` implementation // (see below). @@ -481,6 +490,25 @@ extension Database { arguments: arguments, publicStatementArguments: configuration.publicStatementArguments) } + + private func checkForAutocommitTransition() { + if sqlite3_get_autocommit(sqliteConnection) == 0 { + if autocommitState == .on { + // Record transaction date as soon as the connection leaves + // auto-commit mode. + // We grab a result, so that this failure is later reported + // whenever the user calls `Database.transactionDate`. + transactionDateResult = Result { try configuration.transactionClock.now(self) } + } + autocommitState = .off + } else { + if autocommitState == .off { + // Reset transaction date + transactionDateResult = nil + } + autocommitState = .on + } + } } /// A thread-unsafe statement cache @@ -510,7 +538,7 @@ struct StatementCache { let statement = try db.makeStatement(sql: sql, prepFlags: CUnsignedInt(SQLITE_PREPARE_PERSISTENT)) #else let statement: Statement - if #available(iOS 12.0, OSX 10.14, watchOS 5.0, *) { + if #available(iOS 12, macOS 10.14, watchOS 5, *) { // SQLite 3.20+ statement = try db.makeStatement(sql: sql, prepFlags: CUnsignedInt(SQLITE_PREPARE_PERSISTENT)) } else { statement = try db.makeStatement(sql: sql) diff --git a/GRDB/Core/Database.swift b/GRDB/Core/Database.swift index fb0e129c22..762ff7506b 100644 --- a/GRDB/Core/Database.swift +++ b/GRDB/Core/Database.swift @@ -60,15 +60,28 @@ let SQLITE_TRANSIENT = unsafeBitCast(OpaquePointer(bitPattern: -1), to: sqlite3_ /// - ``inSavepoint(_:)`` /// - ``inTransaction(_:_:)`` /// - ``isInsideTransaction`` +/// - ``readOnly(_:)`` /// - ``rollback()`` +/// - ``transactionDate`` /// - ``TransactionCompletion`` /// - ``TransactionKind`` /// +/// ### Printing Database Content +/// +/// - ``dumpContent(format:to:)`` +/// - ``dumpRequest(_:format:to:)`` +/// - ``dumpSchema(to:)`` +/// - ``dumpSQL(_:format:to:)`` +/// - ``dumpTables(_:format:tableHeader:stableOrder:to:)`` +/// - ``DumpFormat`` +/// - ``DumpTableHeaderOptions`` +/// /// ### Database Observation /// /// - ``add(transactionObserver:extent:)`` /// - ``remove(transactionObserver:)`` /// - ``afterNextTransaction(onCommit:onRollback:)`` +/// - ``notifyChanges(in:)`` /// - ``registerAccess(to:)`` /// /// ### Collations @@ -93,6 +106,7 @@ let SQLITE_TRANSIENT = unsafeBitCast(OpaquePointer(bitPattern: -1), to: sqlite3_ /// /// ### Other Database Operations /// +/// - ``add(tokenizer:)`` /// - ``backup(to:pagesPerStep:progress:)`` /// - ``checkpoint(_:on:)`` /// - ``clearSchemaCache()`` @@ -101,6 +115,7 @@ let SQLITE_TRANSIENT = unsafeBitCast(OpaquePointer(bitPattern: -1), to: sqlite3_ /// - ``trace(options:_:)`` /// - ``CheckpointMode`` /// - ``DatabaseBackupProgress`` +/// - ``StorageClass`` /// - ``TraceEvent`` /// - ``TracingOptions`` public final class Database: CustomStringConvertible, CustomDebugStringConvertible { @@ -276,6 +291,60 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib /// This cache is never cleared: we assume journal mode never changes. var journalModeCache: String? + // MARK: - Transaction Date + + enum AutocommitState { + case off + case on + } + + /// The state of the auto-commit mode, as left by the last + /// executed statement. + /// + /// The goal of this property is to detect changes in the auto-commit mode. + /// When you need to know if the database is currently in the auto-commit + /// mode, always prefer ``isInsideTransaction``. + var autocommitState = AutocommitState.on + + /// The date of the current transaction, wrapped in a result that is an + /// error if there was an error grabbing this date when the transaction has + /// started. + /// + /// Invariant: `transactionDateResult` is nil iff connection is not + /// inside a transaction. + var transactionDateResult: Result? + + /// The date of the current transaction. + /// + /// The returned date is constant at any point during a transaction. It is + /// set when the database leaves the + /// [autocommit mode](https://www.sqlite.org/c3ref/get_autocommit.html) with + /// a `BEGIN` statement. + /// + /// When the database is not currently in a transaction, a new date is + /// returned on each call. + /// + /// See for an example of usage. + /// + /// The transaction date, by default, is the start date of the current + /// transaction. You can override this default behavior by configuring + /// ``Configuration/transactionClock``. + public var transactionDate: Date { + get throws { + SchedulingWatchdog.preconditionValidQueue(self) + + // Check invariant: `transactionDateResult` is nil iff connection + // is not inside a transaction. + assert(isInsideTransaction || transactionDateResult == nil) + + if let transactionDateResult { + return try transactionDateResult.get() + } else { + return try configuration.transactionClock.now(self) + } + } + } + // MARK: - Private properties /// Support for ``Configuration/busyMode``. @@ -335,10 +404,10 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib _ = sqlite3_close(sqliteConnection) // ignore result code throw DatabaseError(resultCode: code) } - if let sqliteConnection = sqliteConnection { - return sqliteConnection + guard let sqliteConnection else { + throw DatabaseError(resultCode: .SQLITE_INTERNAL) // WTF SQLite? } - throw DatabaseError(resultCode: .SQLITE_INTERNAL) // WTF SQLite? + return sqliteConnection } // MARK: - Database Setup @@ -365,6 +434,41 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib configuration.SQLiteConnectionDidOpen?() } + /// Performs ``Configuration/JournalModeConfiguration/wal``. + func setUpWALMode() throws { + let journalMode = try String.fetchOne(self, sql: "PRAGMA journal_mode = WAL") + guard journalMode == "wal" else { + throw DatabaseError(message: "could not activate WAL Mode at path: \(path)") + } + + // https://www.sqlite.org/pragma.html#pragma_synchronous + // > Many applications choose NORMAL when in WAL mode + try execute(sql: "PRAGMA synchronous = NORMAL") + + // Make sure a non-empty wal file exists. + // + // The presence of the wal file avoids an SQLITE_CANTOPEN (14) + // error when the user opens a pool and reads from it. + // See . + // + // The non-empty wal file avoids an SQLITE_ERROR (1) error + // when the user opens a pool and creates a wal snapshot + // (which happens when starting a ValueObservation). + // See . + let walPath = path + "-wal" + if try FileManager.default.fileExists(atPath: walPath) == false + || (URL(fileURLWithPath: walPath).resourceValues(forKeys: [.fileSizeKey]).fileSize ?? 0) == 0 + { + try inSavepoint { + try execute(sql: """ + CREATE TABLE grdb_issue_102 (id INTEGER PRIMARY KEY); + DROP TABLE grdb_issue_102; + """) + return .commit + } + } + } + private func setupDoubleQuotedStringLiterals() { if configuration.acceptsDoubleQuotedStringLiterals { _enableDoubleQuotedStringLiterals(sqliteConnection) @@ -671,11 +775,38 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib } } - /// Grants read-only access in the wrapped closure. - func readOnly(_ block: () throws -> T) throws -> T { + /// Executes read-only database operations, and returns their result + /// after they have finished executing. + /// + /// Attempts to write throw a ``DatabaseError`` with + /// resultCode `SQLITE_READONLY`. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.write do { db in + /// // Write OK + /// try Player(...).insert(db) + /// + /// try db.readOnly { + /// // Read OK + /// let players = try Player.fetchAll(db) + /// + /// // Throws SQLITE_READONLY + /// try Player(...).insert(db) + /// } + /// } + /// ``` + /// + /// This method is reentrant. + /// + /// - parameter value: A closure that reads from the database. + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or the + /// error thrown by `value`. + public func readOnly(_ value: () throws -> T) throws -> T { try beginReadOnly() return try throwingFirstError( - execute: block, + execute: value, finally: endReadOnly) } @@ -689,6 +820,11 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib /// Reports the database region to ``ValueObservation``. /// + /// Calling this method does not fetch any database values. It just + /// helps optimizing `ValueObservation`. See + /// ``ValueObservation/trackingConstantRegion(_:)`` for more + /// information, and some examples of usage. + /// /// For example: /// /// ```swift @@ -700,12 +836,9 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib /// } /// ``` /// - /// See ``ValueObservation/trackingConstantRegion(_:)`` for some examples - /// of region reporting. - /// - /// This method has no effect on a ``ValueObservation`` created with an - /// explicit list of tracked regions. In the example below, only the - /// `player` table is tracked: + /// This method has no effect on a `ValueObservation` created with + /// ``ValueObservation/tracking(regions:fetch:)``. In the example below, + /// only the `player` table is tracked: /// /// ```swift /// // Observes the 'player' table only @@ -720,6 +853,65 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib } } + /// Notifies that some changes were performed in the provided + /// database region. + /// + /// This method makes it possible to notify undetected changes, such as + /// changes performed by another process, changes performed by + /// direct calls to SQLite C functions, or changes to the + /// database schema. + /// See + /// for a detailed list of undetected database modifications. + /// + /// It triggers active transaction observers (``TransactionObserver``). + /// In particular, ``ValueObservation`` that observe the input `region` + /// will fetch and notify a fresh value. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.write { db in + /// // Notify observers that some changes were performed in the database + /// try db.notifyChanges(in: .fullDatabase) + /// + /// // Notify observers that some changes were performed in the player table + /// try db.notifyChanges(in: Player.all()) + /// + /// // Equivalent alternative + /// try db.notifyChanges(in: Table("player")) + /// } + /// ``` + /// + /// This method has no effect when called from a read-only + /// database access. + /// + /// > Caveat: Individual rowids in the input region are ignored. + /// > Notifying a change to a specific rowid is the same as notifying a + /// > change in the whole table: + /// > + /// > ```swift + /// > try dbQueue.write { db in + /// > // Equivalent + /// > try db.notifyChanges(in: Player.all()) + /// > try db.notifyChanges(in: Player.filter(id: 1)) + /// > } + /// > ``` + public func notifyChanges(in region: some DatabaseRegionConvertible) throws { + // Don't do anything when read-only, because read-only transactions + // are not notified. We don't want to notify transactions observers + // of changes, and have them wait for a commit notification that + // will never come. + if !isReadOnly, let observationBroker { + let eventKinds = try region + .databaseRegion(self) + // Use canonical table names for case insensitivity of the input. + .canonicalTables(self) + .impactfulEventKinds(self) + + try observationBroker.notifyChanges(withEventsOfKind: eventKinds) + } + } + /// Extends the `region` argument with the database region selected by all /// statements executed by the closure, and all regions explicitly tracked /// with the ``registerAccess(to:)`` method. @@ -804,31 +996,16 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib self.trace = trace if options.isEmpty || trace == nil { - #if os(Linux) - sqlite3_trace(sqliteConnection, nil) - #else sqlite3_trace_v2(sqliteConnection, 0, nil, nil) - #endif return } - // sqlite3_trace_v2 and sqlite3_expanded_sql were introduced in SQLite 3.14.0 - // http://www.sqlite.org/changes.html#version_3_14 - #if os(Linux) - let dbPointer = Unmanaged.passUnretained(self).toOpaque() - sqlite3_trace(sqliteConnection, { (dbPointer, sql) in - guard let sql = sql.map(String.init(cString:)) else { return } - let db = Unmanaged.fromOpaque(dbPointer!).takeUnretainedValue() - db.trace?(Database.TraceEvent.statement(TraceEvent.Statement(impl: .trace_v1(sql)))) - }, dbPointer) - #else let dbPointer = Unmanaged.passUnretained(self).toOpaque() sqlite3_trace_v2(sqliteConnection, CUnsignedInt(bitPattern: options.rawValue), { (mask, dbPointer, p, x) in let db = Unmanaged.fromOpaque(dbPointer!).takeUnretainedValue() db.trace_v2(CInt(bitPattern: mask), p, x, sqlite3_expanded_sql) return SQLITE_OK }, dbPointer) - #endif } // Precondition: configuration.trace != nil @@ -836,7 +1013,7 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib _ mask: CInt, _ p: UnsafeMutableRawPointer?, _ x: UnsafeMutableRawPointer?, - _ sqlite3_expanded_sql: @escaping @convention(c) (OpaquePointer?) -> UnsafeMutablePointer?) + _ sqlite3_expanded_sql: @escaping @convention(c) (OpaquePointer?) -> UnsafeMutablePointer?) { guard let trace else { return } @@ -844,26 +1021,22 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib case SQLITE_TRACE_STMT: if let sqliteStatement = p, let unexpandedSQL = x { let statement = TraceEvent.Statement( - impl: .trace_v2( - sqliteStatement: OpaquePointer(sqliteStatement), - unexpandedSQL: UnsafePointer(unexpandedSQL.assumingMemoryBound(to: CChar.self)), - sqlite3_expanded_sql: sqlite3_expanded_sql, - publicStatementArguments: configuration.publicStatementArguments)) + sqliteStatement: OpaquePointer(sqliteStatement), + unexpandedSQL: UnsafePointer(unexpandedSQL.assumingMemoryBound(to: CChar.self)), + sqlite3_expanded_sql: sqlite3_expanded_sql, + publicStatementArguments: configuration.publicStatementArguments) trace(TraceEvent.statement(statement)) } case SQLITE_TRACE_PROFILE: if let sqliteStatement = p, let durationP = x?.assumingMemoryBound(to: Int64.self) { let statement = TraceEvent.Statement( - impl: .trace_v2( - sqliteStatement: OpaquePointer(sqliteStatement), - unexpandedSQL: nil, - sqlite3_expanded_sql: sqlite3_expanded_sql, - publicStatementArguments: configuration.publicStatementArguments)) + sqliteStatement: OpaquePointer(sqliteStatement), + unexpandedSQL: nil, + sqlite3_expanded_sql: sqlite3_expanded_sql, + publicStatementArguments: configuration.publicStatementArguments) let duration = TimeInterval(durationP.pointee) / 1.0e9 - #if !os(Linux) trace(TraceEvent.profile(statement: statement, duration: duration)) - #endif } default: break @@ -1084,7 +1257,7 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib /// For example: /// /// ```swift - /// try dbQueue.writeWithoutTransaction do { + /// try dbQueue.writeWithoutTransaction do { db in /// try db.inTransaction { /// try db.execute(sql: "INSERT ...") /// return .commit @@ -1163,7 +1336,7 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib } } - if let firstError = firstError { + if let firstError { throw firstError } } @@ -1312,7 +1485,7 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib } } - if let firstError = firstError { + if let firstError { throw firstError } } @@ -1560,6 +1733,11 @@ public final class Database: CustomStringConvertible, CustomDebugStringConvertib } } +// Explicit non-conformance to Sendable: `Database` must be used from a +// serialized database access dispatch queue (see `SerializedDatabase`). +@available(*, unavailable) +extension Database: Sendable { } + #if SQLITE_HAS_CODEC extension Database { @@ -1685,7 +1863,7 @@ extension Database { /// The available checkpoint modes. /// /// Related SQLite documentation: - public enum CheckpointMode: CInt { + public enum CheckpointMode: CInt, Sendable { /// The `SQLITE_CHECKPOINT_PASSIVE` mode. case passive = 0 @@ -1704,7 +1882,7 @@ extension Database { /// Related SQLite documentation: /// - /// - - public struct CollationName: RawRepresentable, Hashable { + public struct CollationName: RawRepresentable, Hashable, Sendable { public let rawValue: String /// Creates a collation name. @@ -1741,10 +1919,11 @@ extension Database { /// /// For more information, see /// [Datatypes In SQLite](https://www.sqlite.org/datatype3.html). - public struct ColumnType: RawRepresentable, Hashable { + public struct ColumnType: RawRepresentable, Hashable, Sendable { /// The SQL for the column type (`"TEXT"`, `"BLOB"`, etc.) public let rawValue: String + // TODO: GRDB7 make it an failable initializer that returns nil when rawValue is empty (or blank). /// Creates an SQL column type. public init(rawValue: String) { self.rawValue = rawValue @@ -1753,6 +1932,14 @@ extension Database { /// The `TEXT` column type. public static let text = ColumnType(rawValue: "TEXT") + /// The `TEXT` column type, suitable for JSON columns. + /// + /// SQLite JSON functions and operators are + /// [documented](https://www.sqlite.org/json1.html#interface_overview) + /// to throw errors if any of their arguments are binary blobs. + /// That's the reason why it is recommended to store JSON as text. + public static let jsonText = ColumnType(rawValue: "TEXT") + /// The `INTEGER` column type. public static let integer = ColumnType(rawValue: "INTEGER") @@ -1784,7 +1971,7 @@ extension Database { /// An SQLite conflict resolution. /// /// Related SQLite documentation: - public enum ConflictResolution: String { + public enum ConflictResolution: String, Sendable { /// The `ROLLBACK` conflict resolution. case rollback = "ROLLBACK" @@ -1804,7 +1991,7 @@ extension Database { /// A foreign key action. /// /// Related SQLite documentation: - public enum ForeignKeyAction: String { + public enum ForeignKeyAction: String, Sendable { /// The `CASCADE` foreign key action. case cascade = "CASCADE" @@ -1821,13 +2008,39 @@ extension Database { /// An error log function that takes an error code and message. public typealias LogErrorFunction = (_ resultCode: ResultCode, _ message: String) -> Void + /// An SQLite storage class. + /// + /// For more information, see + /// [Datatypes In SQLite](https://www.sqlite.org/datatype3.html). + public struct StorageClass: RawRepresentable, Hashable, Sendable { + /// The SQL for the storage class (`"INTEGER"`, `"REAL"`, etc.) + public let rawValue: String + + /// Creates an SQL storage class. + public init(rawValue: String) { + self.rawValue = rawValue + } + + /// The `INTEGER` storage class. + public static let integer = StorageClass(rawValue: "INTEGER") + + /// The `REAL` storage class. + public static let real = StorageClass(rawValue: "REAL") + + /// The `TEXT` storage class. + public static let text = StorageClass(rawValue: "TEXT") + + /// The `BLOB` storage class. + public static let blob = StorageClass(rawValue: "BLOB") + } + /// An option for the SQLite tracing feature. /// /// You use `TracingOptions` with the `Database` /// ``Database/trace(options:_:)`` method. /// /// Related SQLite documentation: - public struct TracingOptions: OptionSet { + public struct TracingOptions: OptionSet, Sendable { /// The raw trace event code. public let rawValue: CInt @@ -1841,13 +2054,11 @@ extension Database { /// Trace event code: `SQLITE_TRACE_STMT`. public static let statement = TracingOptions(rawValue: SQLITE_TRACE_STMT) - #if !os(Linux) /// The option that reports executed statements and the estimated /// duration that the statement took to run. /// /// Trace event code: `SQLITE_TRACE_PROFILE`. public static let profile = TracingOptions(rawValue: SQLITE_TRACE_PROFILE) - #endif } /// A trace event. @@ -1858,17 +2069,11 @@ extension Database { /// Information about an executed statement. public struct Statement: CustomStringConvertible { - enum Impl { - case trace_v1(String) - case trace_v2( - sqliteStatement: SQLiteStatement, - unexpandedSQL: UnsafePointer?, - sqlite3_expanded_sql: @convention(c) (OpaquePointer?) -> UnsafeMutablePointer?, - publicStatementArguments: Bool) // See Configuration.publicStatementArguments - } - var impl: Impl + var sqliteStatement: SQLiteStatement + var unexpandedSQL: UnsafePointer? + var sqlite3_expanded_sql: @convention(c) (OpaquePointer?) -> UnsafeMutablePointer? + var publicStatementArguments: Bool // See Configuration.publicStatementArguments - #if !os(Linux) /// The executed SQL, where bound parameters are not expanded. /// /// For example: @@ -1876,21 +2081,11 @@ extension Database { /// ```sql /// SELECT * FROM player WHERE email = ? /// ``` - public var sql: String { _sql } - #endif - - var _sql: String { - switch impl { - case .trace_v1: - // Likely a GRDB bug: this api is not supposed to be available - fatalError("Unavailable statement SQL") - - case let .trace_v2(sqliteStatement, unexpandedSQL, _, _): - if let unexpandedSQL = unexpandedSQL { - return String(cString: unexpandedSQL).trimmedSQLStatement - } else { - return String(cString: sqlite3_sql(sqliteStatement)).trimmedSQLStatement - } + public var sql: String { + if let unexpandedSQL { + return String(cString: unexpandedSQL).trimmedSQLStatement + } else { + return String(cString: sqlite3_sql(sqliteStatement)).trimmedSQLStatement } } @@ -1906,30 +2101,18 @@ extension Database { /// information from leaking in unexpected locations, so use this /// property with care. public var expandedSQL: String { - switch impl { - case let .trace_v1(expandedSQL): - return expandedSQL - - case let .trace_v2(sqliteStatement, _, sqlite3_expanded_sql, _): - guard let cString = sqlite3_expanded_sql(sqliteStatement) else { - return "" - } - defer { sqlite3_free(cString) } - return String(cString: cString).trimmedSQLStatement + guard let cString = sqlite3_expanded_sql(sqliteStatement) else { + return "" } + defer { sqlite3_free(cString) } + return String(cString: cString).trimmedSQLStatement } public var description: String { - switch impl { - case let .trace_v1(expandedSQL): + if publicStatementArguments { return expandedSQL - - case let .trace_v2(_, _, _, publicStatementArguments): - if publicStatementArguments { - return expandedSQL - } else { - return _sql - } + } else { + return sql } } } @@ -1990,7 +2173,7 @@ extension Database { /// /// Related SQLite documentation: . @frozen - public enum TransactionCompletion { + public enum TransactionCompletion: Sendable { case commit case rollback } @@ -1998,7 +2181,7 @@ extension Database { /// A transaction kind. /// /// Related SQLite documentation: . - public enum TransactionKind: String { + public enum TransactionKind: String, Sendable { /// The `DEFERRED` transaction kind. case deferred = "DEFERRED" @@ -2031,3 +2214,13 @@ extension Database { } } } + +// Explicit non-conformance to Sendable: a trace event contains transient +// information. +@available(*, unavailable) +extension Database.TraceEvent: Sendable { } + +// Explicit non-conformance to Sendable: a trace event contains transient +// information. +@available(*, unavailable) +extension Database.TraceEvent.Statement: Sendable { } diff --git a/GRDB/Core/DatabaseBackupProgress.swift b/GRDB/Core/DatabaseBackupProgress.swift index 3119dfa5b6..52a4a56d5e 100644 --- a/GRDB/Core/DatabaseBackupProgress.swift +++ b/GRDB/Core/DatabaseBackupProgress.swift @@ -1,7 +1,7 @@ /// Describe the progress of a database backup. /// /// Related SQLite documentation: -public struct DatabaseBackupProgress { +public struct DatabaseBackupProgress: Sendable { /// The number of pages still to be backed up. /// /// It is the result of the `sqlite3_backup_remaining` function. diff --git a/GRDB/Core/DatabaseError.swift b/GRDB/Core/DatabaseError.swift index 7fdc9d1ab5..53eb009b37 100644 --- a/GRDB/Core/DatabaseError.swift +++ b/GRDB/Core/DatabaseError.swift @@ -198,7 +198,7 @@ extension ResultCode: CustomStringConvertible { } public var description: String { - if let errorString = errorString { + if let errorString { return "\(rawValue) (\(errorString))" } else { return "\(rawValue)" @@ -216,10 +216,12 @@ extension ResultCode: Sendable { } /// do { /// try player.insert(db) /// } catch let error as DatabaseError { -/// switch error.resultCode { -/// case ResultCode.SQLITE_CONSTRAINT_FOREIGNKEY: +/// print(error) // prints debugging information +/// +/// switch error { +/// case DatabaseError.SQLITE_CONSTRAINT_FOREIGNKEY: /// // foreign key constraint error -/// case ResultCode.SQLITE_CONSTRAINT: +/// case DatabaseError.SQLITE_CONSTRAINT: /// // any other constraint error /// default: /// // any other database error @@ -394,6 +396,10 @@ public struct DatabaseError: Error { static func noSuchTable(_ tableName: String) -> Self { DatabaseError(message: "no such table: \(tableName)") } + + static func noSuchSchema(_ schemaName: String) -> Self { + DatabaseError(message: "no such schema: \(schemaName)") + } } extension DatabaseError { @@ -543,13 +549,13 @@ extension DatabaseError: CustomStringConvertible { /// without notice: don't have your application rely on any specific format. public var description: String { var description = "SQLite error \(resultCode.rawValue)" - if let message = message { + if let message { description += ": \(message)" } - if let sql = sql { - description += " - while executing `\(sql)`" + if let sql { + description += " - while executing `\(sql.trimmedSQLStatement)`" } - if publicStatementArguments, let arguments = arguments, !arguments.isEmpty { + if publicStatementArguments, let arguments, !arguments.isEmpty { description += " with arguments \(arguments)" } return description @@ -571,13 +577,13 @@ extension DatabaseError: CustomStringConvertible { /// property with care. public var expandedDescription: String { var description = "SQLite error \(resultCode.rawValue)" - if let message = message { + if let message { description += ": \(message)" } - if let sql = sql { - description += " - while executing `\(sql)`" + if let sql { + description += " - while executing `\(sql.trimmedSQLStatement)`" } - if let arguments = arguments, !arguments.isEmpty { + if let arguments, !arguments.isEmpty { description += " with arguments \(arguments)" } return description @@ -598,7 +604,7 @@ extension DatabaseError: CustomNSError { /// Part of the `CustomNSError` conformance. public var errorUserInfo: [String: Any] { var userInfo = [NSLocalizedDescriptionKey: description] - if let message = message { + if let message { userInfo[NSLocalizedFailureReasonErrorKey] = message } return userInfo diff --git a/GRDB/Core/DatabaseFunction.swift b/GRDB/Core/DatabaseFunction.swift index db3af3ef86..81fe825748 100644 --- a/GRDB/Core/DatabaseFunction.swift +++ b/GRDB/Core/DatabaseFunction.swift @@ -30,9 +30,9 @@ public final class DatabaseFunction: Hashable { /// The name of the SQL function public var name: String { identity.name } private let identity: Identity - let pure: Bool + let isPure: Bool private let kind: Kind - private var eTextRep: CInt { (SQLITE_UTF8 | (pure ? SQLITE_DETERMINISTIC : 0)) } + private var eTextRep: CInt { (SQLITE_UTF8 | (isPure ? SQLITE_DETERMINISTIC : 0)) } /// Creates an SQL function. /// @@ -76,7 +76,7 @@ public final class DatabaseFunction: Hashable { function: @escaping ([DatabaseValue]) throws -> (any DatabaseValueConvertible)?) { self.identity = Identity(name: name, nArg: argumentCount.map(CInt.init) ?? -1) - self.pure = pure + self.isPure = pure self.kind = .function{ (argc, argv) in let arguments = (0.. expose ORDER BY and FILTER when we have distinct types for simple functions and aggregates. /// Returns an SQL expression that applies the function. /// /// You can use a `DatabaseFunction` as a regular Swift function. It returns @@ -171,13 +172,23 @@ public final class DatabaseFunction: Hashable { /// ``` public func callAsFunction(_ arguments: any SQLExpressible...) -> SQLExpression { switch kind { - case .aggregate: - return .function(name, arguments.map(\.sqlExpression)) case .function: - return .aggregate(name, arguments.map(\.sqlExpression)) + return .simpleFunction( + name, + arguments.map(\.sqlExpression), + isPure: isPure, + isJSONValue: false) + case .aggregate: + return .aggregateFunction( + name, + arguments.map(\.sqlExpression), + isDistinct: false, + ordering: nil, + filter: nil, + isJSONValue: false) } } - + /// Calls sqlite3_create_function_v2 /// See func install(in db: Database) { diff --git a/GRDB/Core/DatabasePool.swift b/GRDB/Core/DatabasePool.swift index 3f146ceb3e..7dd0525fc2 100644 --- a/GRDB/Core/DatabasePool.swift +++ b/GRDB/Core/DatabasePool.swift @@ -13,6 +13,9 @@ public final class DatabasePool { @LockedBox var databaseSnapshotCount = 0 + /// If Database Suspension is enabled, this array contains the necessary `NotificationCenter` observers. + private var suspensionObservers: [NSObjectProtocol] = [] + // MARK: - Database Information public var configuration: Configuration { @@ -72,31 +75,12 @@ public final class DatabasePool { purpose: "reader.\(readerCount)") }) - // Activate WAL Mode unless readonly + // Set up journal mode unless readonly if !configuration.readonly { - try writer.sync { db in - let journalMode = try String.fetchOne(db, sql: "PRAGMA journal_mode = WAL") - guard journalMode == "wal" else { - throw DatabaseError(message: "could not activate WAL Mode at path: \(path)") - } - - // https://www.sqlite.org/pragma.html#pragma_synchronous - // > Many applications choose NORMAL when in WAL mode - try db.execute(sql: "PRAGMA synchronous = NORMAL") - - if !FileManager.default.fileExists(atPath: path + "-wal") { - // Create the -wal file if it does not exist yet. This - // avoids an SQLITE_CANTOPEN (14) error whenever a user - // opens a pool to an existing non-WAL database, and - // attempts to read from it. - // See https://github.com/groue/GRDB.swift/issues/102 - try db.inSavepoint { - try db.execute(sql: """ - CREATE TABLE grdb_issue_102 (id INTEGER PRIMARY KEY); - DROP TABLE grdb_issue_102; - """) - return .commit - } + switch configuration.journalMode { + case .default, .wal: + try writer.sync { + try $0.setUpWALMode() } } } @@ -113,10 +97,13 @@ public final class DatabasePool { } deinit { + // Remove block-based Notification observers. + suspensionObservers.forEach(NotificationCenter.default.removeObserver(_:)) + // Undo job done in setupMemoryManagement() // // https://developer.apple.com/library/mac/releasenotes/Foundation/RN-Foundation/index.html#10_11Error - // Explicit unregistration is required before OS X 10.11. + // Explicit unregistration is required before macOS 10.11. NotificationCenter.default.removeObserver(self) // Close reader connections before the writer connection. @@ -161,19 +148,22 @@ public final class DatabasePool { } } -// @unchecked because of databaseSnapshotCount and readerPool +// @unchecked because of databaseSnapshotCount, readerPool and suspensionObservers extension DatabasePool: @unchecked Sendable { } extension DatabasePool { // MARK: - Memory management - /// Frees as much memory as possible, by disposing non-essential memory from - /// the writer connection, and closing all reader connections. + /// Frees as much memory as possible, by disposing non-essential memory. /// /// This method is synchronous, and blocks the current thread until all /// database accesses are completed. /// + /// This method closes all read-only connections, unless the + /// ``Configuration/persistentReadOnlyConnections`` configuration flag + /// is set. + /// /// - warning: This method can prevent concurrent reads from executing, /// until it returns. Prefer ``releaseMemoryEventually()`` if you intend /// to keep on using the database while releasing memory. @@ -181,34 +171,50 @@ extension DatabasePool { // Release writer memory writer.sync { $0.releaseMemory() } - // Release readers memory by closing all connections. - // - // We must use a barrier in order to guarantee that memory has been - // freed (reader connections closed) when the method exits, as - // documented. - // - // Without the barrier, connections would only close _eventually_ (after - // their eventual concurrent jobs have completed). - readerPool?.barrier { - readerPool?.removeAll() + if configuration.persistentReadOnlyConnections { + // Keep existing readers + readerPool?.forEach { reader in + reader.sync { $0.releaseMemory() } + } + } else { + // Release readers memory by closing all connections. + // + // We must use a barrier in order to guarantee that memory has been + // freed (reader connections closed) when the method exits, as + // documented. + // + // Without the barrier, connections would only close _eventually_ (after + // their eventual concurrent jobs have completed). + readerPool?.barrier { + readerPool?.removeAll() + } } } - /// Eventually frees as much memory as possible, by disposing non-essential - /// memory from the writer connection, and closing all reader connections. + /// Eventually frees as much memory as possible, by disposing + /// non-essential memory. + /// + /// This method eventually closes all read-only connections, unless the + /// ``Configuration/persistentReadOnlyConnections`` configuration flag + /// is set. /// /// Unlike ``releaseMemory()``, this method does not prevent concurrent /// database accesses when it is executing. But it does not notify when /// non-essential memory has been freed. public func releaseMemoryEventually() { - // Release readers memory by eventually closing all reader connections - // (they will close after their current jobs have completed). - readerPool?.removeAll() + if configuration.persistentReadOnlyConnections { + // Keep existing readers + readerPool?.forEach { reader in + reader.async { $0.releaseMemory() } + } + } else { + // Release readers memory by eventually closing all reader connections + // (they will close after their current jobs have completed). + readerPool?.removeAll() + } // Release writer memory eventually. - writer.async { db in - db.releaseMemory() - } + writer.async { $0.releaseMemory() } } #if os(iOS) @@ -318,29 +324,21 @@ extension DatabasePool: DatabaseReader { private func setupSuspension() { if configuration.observesSuspensionNotifications { let center = NotificationCenter.default - center.addObserver( - self, - selector: #selector(DatabasePool.suspend(_:)), - name: Database.suspendNotification, - object: nil) - center.addObserver( - self, - selector: #selector(DatabasePool.resume(_:)), - name: Database.resumeNotification, - object: nil) + suspensionObservers.append(center.addObserver( + forName: Database.suspendNotification, + object: nil, + queue: nil, + using: { [weak self] _ in self?.suspend() } + )) + suspensionObservers.append(center.addObserver( + forName: Database.resumeNotification, + object: nil, + queue: nil, + using: { [weak self] _ in self?.resume() } + )) } } - @objc - private func suspend(_ notification: Notification) { - suspend() - } - - @objc - private func resume(_ notification: Notification) { - resume() - } - // MARK: - Reading from Database @_disfavoredOverload // SR-15150 Async overloading in protocol implementation fails @@ -361,7 +359,7 @@ extension DatabasePool: DatabaseReader { public func asyncRead(_ value: @escaping (Result) -> Void) { guard let readerPool else { - value(.failure(DatabaseError(resultCode: .SQLITE_MISUSE, message: "Connection is closed"))) + value(.failure(DatabaseError.connectionIsClosed())) return } @@ -405,7 +403,7 @@ extension DatabasePool: DatabaseReader { public func asyncUnsafeRead(_ value: @escaping (Result) -> Void) { guard let readerPool else { - value(.failure(DatabaseError(resultCode: .SQLITE_MISUSE, message: "Connection is closed"))) + value(.failure(DatabaseError.connectionIsClosed())) return } @@ -608,8 +606,12 @@ extension DatabasePool: DatabaseReader { /// After this method is called, read-only database access methods will use /// new SQLite connections. /// - /// Eventual concurrent read-only accesses are not invalidated: they will + /// Eventual concurrent read-only accesses are not interrupted, and /// proceed until completion. + /// + /// - This method closes all read-only connections, even if the + /// ``Configuration/persistentReadOnlyConnections`` configuration flag + /// is set. public func invalidateReadOnlyConnections() { readerPool?.removeAll() } @@ -640,6 +642,50 @@ extension DatabasePool: DatabaseReader { return readers.first { $0.onValidQueue } } + // MARK: - WAL Snapshot Transactions + + // swiftlint:disable:next line_length +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) + /// Returns a long-lived WAL snapshot transaction on a reader connection. + func walSnapshotTransaction() throws -> WALSnapshotTransaction { + guard let readerPool else { + throw DatabaseError.connectionIsClosed() + } + + let (reader, releaseReader) = try readerPool.get() + return try WALSnapshotTransaction(onReader: reader, release: { isInsideTransaction in + // Discard the connection if the transaction could not be + // properly ended. If we'd reuse it, the next read would + // fail because we'd fail starting a read transaction. + releaseReader(isInsideTransaction ? .discard : .reuse) + }) + } + + /// Returns a long-lived WAL snapshot transaction on a reader connection. + /// + /// - important: The `completion` argument is executed in a serial + /// dispatch queue, so make sure you use the transaction asynchronously. + func asyncWALSnapshotTransaction(_ completion: @escaping (Result) -> Void) { + guard let readerPool else { + completion(.failure(DatabaseError.connectionIsClosed())) + return + } + + readerPool.asyncGet { result in + completion(result.flatMap { reader, releaseReader in + Result { + try WALSnapshotTransaction(onReader: reader, release: { isInsideTransaction in + // Discard the connection if the transaction could not be + // properly ended. If we'd reuse it, the next read would + // fail because we'd fail starting a read transaction. + releaseReader(isInsideTransaction ? .discard : .reuse) + }) + } + }) + } + } +#endif + // MARK: - Database Observation public func _add( @@ -833,9 +879,11 @@ extension DatabasePool { /// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) /// /// A ``DatabaseError`` of code `SQLITE_ERROR` is thrown if the SQLite - /// database is not in the [WAL mode](https://www.sqlite.org/wal.html), or - /// if this method is called from a database access where a write - /// transaction is open. + /// database is not in the [WAL mode](https://www.sqlite.org/wal.html), + /// or if this method is called from a write transaction, or if the + /// wal file is missing or truncated (size zero). + /// + /// Related SQLite documentation: public func makeSnapshotPool() throws -> DatabaseSnapshotPool { try unsafeReentrantRead { db in try DatabaseSnapshotPool(db) diff --git a/GRDB/Core/DatabasePublishers.swift b/GRDB/Core/DatabasePublishers.swift index 3aa69d9d93..8f8054953b 100644 --- a/GRDB/Core/DatabasePublishers.swift +++ b/GRDB/Core/DatabasePublishers.swift @@ -1,5 +1,5 @@ #if canImport(Combine) /// A namespace for database Combine publishers. -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public enum DatabasePublishers { } #endif diff --git a/GRDB/Core/DatabaseQueue.swift b/GRDB/Core/DatabaseQueue.swift index ddb3bf4943..ae763f694c 100644 --- a/GRDB/Core/DatabaseQueue.swift +++ b/GRDB/Core/DatabaseQueue.swift @@ -7,15 +7,15 @@ import UIKit public final class DatabaseQueue { private let writer: SerializedDatabase + /// If Database Suspension is enabled, this array contains the necessary `NotificationCenter` observers. + private var suspensionObservers: [NSObjectProtocol] = [] + // MARK: - Configuration public var configuration: Configuration { writer.configuration } - /// The path to the database file. - /// - /// The path is `:memory:` for in-memory databases. public var path: String { writer.path } @@ -47,6 +47,18 @@ public final class DatabaseQueue { configuration: configuration, defaultLabel: "GRDB.DatabaseQueue") + // Set up journal mode unless readonly + if !configuration.readonly { + switch configuration.journalMode { + case .default: + break + case .wal: + try writer.sync { + try $0.setUpWALMode() + } + } + } + setupSuspension() // Be a nice iOS citizen, and don't consume too much memory @@ -100,14 +112,20 @@ public final class DatabaseQueue { } deinit { + // Remove block-based Notification observers. + suspensionObservers.forEach(NotificationCenter.default.removeObserver(_:)) + // Undo job done in setupMemoryManagement() // // https://developer.apple.com/library/mac/releasenotes/Foundation/RN-Foundation/index.html#10_11Error - // Explicit unregistration is required before OS X 10.11. + // Explicit unregistration is required before macOS 10.11. NotificationCenter.default.removeObserver(self) } } +// @unchecked because of suspensionObservers +extension DatabaseQueue: @unchecked Sendable { } + extension DatabaseQueue { // MARK: - Memory management @@ -189,29 +207,21 @@ extension DatabaseQueue: DatabaseReader { private func setupSuspension() { if configuration.observesSuspensionNotifications { let center = NotificationCenter.default - center.addObserver( - self, - selector: #selector(DatabaseQueue.suspend(_:)), - name: Database.suspendNotification, - object: nil) - center.addObserver( - self, - selector: #selector(DatabaseQueue.resume(_:)), - name: Database.resumeNotification, - object: nil) + suspensionObservers.append(center.addObserver( + forName: Database.suspendNotification, + object: nil, + queue: nil, + using: { [weak self] _ in self?.suspend() } + )) + suspensionObservers.append(center.addObserver( + forName: Database.resumeNotification, + object: nil, + queue: nil, + using: { [weak self] _ in self?.resume() } + )) } } - @objc - private func suspend(_ notification: Notification) { - suspend() - } - - @objc - private func resume(_ notification: Notification) { - resume() - } - // MARK: - Reading from Database @_disfavoredOverload // SR-15150 Async overloading in protocol implementation fails @@ -225,21 +235,22 @@ extension DatabaseQueue: DatabaseReader { public func asyncRead(_ value: @escaping (Result) -> Void) { writer.async { db in + defer { + // Ignore error because we can not notify it. + try? db.commit() + try? db.endReadOnly() + } + do { - // The transaction guarantees snapshot isolation against eventual - // external connection. - try db.beginTransaction(.deferred) + // Enter read-only mode before starting a transaction, so that the + // transaction commit does not trigger database observation. + // See . try db.beginReadOnly() + try db.beginTransaction(.deferred) + value(.success(db)) } catch { value(.failure(error)) - return } - - value(.success(db)) - - // Ignore error because we can not notify it. - try? db.endReadOnly() - try? db.commit() } } @@ -273,20 +284,23 @@ extension DatabaseQueue: DatabaseReader { writer.execute { db in // ... and that no transaction is opened. GRDBPrecondition(!db.isInsideTransaction, "must not be called from inside a transaction.") + + defer { + // Ignore error because we can not notify it. + try? db.commit() + try? db.endReadOnly() + } do { - try db.beginTransaction(.deferred) + // Enter read-only mode before starting a transaction, so that the + // transaction commit does not trigger database observation. + // See . try db.beginReadOnly() + try db.beginTransaction(.deferred) + value(.success(db)) } catch { value(.failure(error)) - return } - - value(.success(db)) - - // Ignore error because we can not notify it. - try? db.endReadOnly() - try? db.commit() } } @@ -415,3 +429,96 @@ extension DatabaseQueue: DatabaseWriter { writer.async(updates) } } + +// MARK: - Temp Copy + +extension DatabaseQueue { + /// Returns a connection to an in-memory copy of the database at `path`. + /// + /// Changes performed on the returned connection do not impact the + /// original database at `path`. + /// + /// The database memory is released when the returned connection + /// is deallocated. + /// + /// For example: + /// + /// ```swift + /// let path = "/path/to/database.sqlite" + /// let dbQueue = try DatabaseQueue.inMemoryCopy(fromPath: path) + /// ``` + public static func inMemoryCopy( + fromPath path: String, + configuration: Configuration = Configuration()) + throws -> DatabaseQueue + { + var sourceConfig = configuration + sourceConfig.readonly = true + let source = try DatabaseQueue(path: path, configuration: sourceConfig) + + var copyConfig = configuration + copyConfig.readonly = false + let result = try DatabaseQueue(configuration: copyConfig) + + try source.backup(to: result) + + if configuration.readonly { + // Result was not opened read-only so that we could perform the + // copy. And SQLITE_OPEN_READONLY has no effect on in-memory + // databases anyway. + // + // So let's simulate read-only with PRAGMA query_only. + try result.inDatabase { db in + try db.beginReadOnly() + } + } + + return result + } + + /// Returns a connection to a private, temporary, on-disk copy of the + /// database at `path`. + /// + /// Changes performed on the returned connection do not impact the + /// original database at `path`. + /// + /// The on-disk copy will be automatically deleted from disk as soon as + /// the returned connection is closed or deallocated. + /// + /// For example: + /// + /// ```swift + /// let path = "/path/to/database.sqlite" + /// let dbQueue = try DatabaseQueue.temporaryCopy(fromPath: path) + /// ``` + public static func temporaryCopy( + fromPath path: String, + configuration: Configuration = Configuration()) + throws -> DatabaseQueue + { + var sourceConfig = configuration + sourceConfig.readonly = true + let source = try DatabaseQueue(path: path, configuration: sourceConfig) + + // + // > If the filename is an empty string, then a private, temporary + // > on-disk database will be created. This private database will be + // > automatically deleted as soon as the database connection + // > is closed. + var copyConfig = configuration + copyConfig.readonly = false + let result = try DatabaseQueue(path: "", configuration: copyConfig) + + try source.backup(to: result) + + if configuration.readonly { + // Result was not opened read-only so that we could perform the + // copy. So let's simulate read-only with PRAGMA query_only. + try result.inDatabase { db in + try db.beginReadOnly() + } + } + + return result + } +} diff --git a/GRDB/Core/DatabaseReader.swift b/GRDB/Core/DatabaseReader.swift index 6bea5c1bb2..5fac0df44e 100644 --- a/GRDB/Core/DatabaseReader.swift +++ b/GRDB/Core/DatabaseReader.swift @@ -17,6 +17,7 @@ import Dispatch /// ### Database Information /// /// - ``configuration`` +/// - ``path`` /// /// ### Reading from the Database /// @@ -32,6 +33,16 @@ import Dispatch /// - ``unsafeReentrantRead(_:)`` /// - ``asyncUnsafeRead(_:)`` /// +/// ### Printing Database Content +/// +/// - ``dumpContent(format:to:)`` +/// - ``dumpRequest(_:format:to:)`` +/// - ``dumpSchema(to:)`` +/// - ``dumpSQL(_:format:to:)`` +/// - ``dumpTables(_:format:tableHeader:stableOrder:to:)`` +/// - ``DumpFormat`` +/// - ``DumpTableHeaderOptions`` +/// /// ### Other Database Operations /// /// - ``backup(to:pagesPerStep:progress:)`` @@ -46,6 +57,12 @@ public protocol DatabaseReader: AnyObject, Sendable { /// The database configuration. var configuration: Configuration { get } + /// The path to the database file. + /// + /// In-memory databases also have a path: + /// see [In-Memory Databases](https://www.sqlite.org/inmemorydb.html). + var path: String { get } + /// Closes the database connection. /// /// - note: You do not have to call this method, and you should not call @@ -441,7 +458,7 @@ extension DatabaseReader { /// - parameter value: A closure which accesses the database. /// - throws: The error thrown by `value`, or any ``DatabaseError`` that /// would happen while establishing the database access. - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func read(_ value: @Sendable @escaping (Database) throws -> T) async throws -> T { try await withUnsafeThrowingContinuation { continuation in asyncRead { result in @@ -487,7 +504,7 @@ extension DatabaseReader { /// - parameter value: A closure which accesses the database. /// - throws: The error thrown by `value`, or any ``DatabaseError`` that /// would happen while establishing the database access. - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func unsafeRead(_ value: @Sendable @escaping (Database) throws -> T) async throws -> T { try await withUnsafeThrowingContinuation { continuation in asyncUnsafeRead { result in @@ -532,7 +549,7 @@ extension DatabaseReader { /// /// - parameter scheduler: A Combine Scheduler. /// - parameter value: A closure which accesses the database. - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func readPublisher( receiveOn scheduler: some Combine.Scheduler = DispatchQueue.main, value: @escaping (Database) throws -> Output) @@ -550,7 +567,7 @@ extension DatabaseReader { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension DatabasePublishers { /// A publisher that reads from the database. /// @@ -569,7 +586,7 @@ extension DatabasePublishers { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Publisher where Failure == Error { fileprivate func eraseToReadPublisher() -> DatabasePublishers.Read { .init(upstream: eraseToAnyPublisher()) @@ -644,6 +661,10 @@ extension AnyDatabaseReader: DatabaseReader { base.configuration } + public var path: String { + base.path + } + public func close() throws { try base.close() } diff --git a/GRDB/Core/DatabaseRegion.swift b/GRDB/Core/DatabaseRegion.swift index 384a9e4141..fddeb77d37 100644 --- a/GRDB/Core/DatabaseRegion.swift +++ b/GRDB/Core/DatabaseRegion.swift @@ -40,7 +40,7 @@ /// /// - ``isModified(byEventsOfKind:)`` /// - ``isModified(by:)`` -public struct DatabaseRegion { +public struct DatabaseRegion: Sendable { private let tableRegions: [CaseInsensitiveIdentifier: TableRegion]? private init(tableRegions: [CaseInsensitiveIdentifier: TableRegion]?) { @@ -169,16 +169,16 @@ public struct DatabaseRegion { // the observed region, we optimize database observation. // // And by canonicalizing table names, we remove views, and help the - // `isModified` methods. + // `isModified` methods. (TODO: is this comment still accurate? + // Isn't it about providing TransactionObserver.observes() with + // real tables names, instead?) try ignoringInternalSQLiteTables().canonicalTables(db) } /// Returns a region only made of actual tables with their canonical names. - /// Canonical names help the `isModified` methods. /// - /// This method removes views (assuming no table exists with the same name - /// as a view). - private func canonicalTables(_ db: Database) throws -> DatabaseRegion { + /// This method removes views. + func canonicalTables(_ db: Database) throws -> DatabaseRegion { guard let tableRegions else { return .fullDatabase } var region = DatabaseRegion() for (table, tableRegion) in tableRegions { @@ -233,6 +233,44 @@ extension DatabaseRegion { } return tableRegion.contains(rowID: event.rowID) } + + /// Returns an array of all event kinds that can impact this region. + /// + /// - precondition: the region is canonical. + func impactfulEventKinds(_ db: Database) throws -> [DatabaseEventKind] { + if let tableRegions { + return try tableRegions.flatMap { (table, tableRegion) -> [DatabaseEventKind] in + let tableName = table.rawValue // canonical table name + let columnNames: Set + if let columns = tableRegion.columns { + columnNames = Set(columns.map(\.rawValue)) + } else { + columnNames = try Set(db.columns(in: tableName).map(\.name)) + } + + return [ + DatabaseEventKind.delete(tableName: tableName), + DatabaseEventKind.insert(tableName: tableName), + DatabaseEventKind.update(tableName: tableName, columnNames: columnNames), + ] + } + } else { + // full database + return try db.schemaIdentifiers().flatMap { schemaIdentifier in + let schema = try db.schema(schemaIdentifier) + return try schema.objects + .filter { $0.type == .table } + .flatMap { table in + let columnNames = try Set(db.columns(in: table.name).map(\.name)) + return [ + DatabaseEventKind.delete(tableName: table.name), + DatabaseEventKind.insert(tableName: table.name), + DatabaseEventKind.update(tableName: table.name, columnNames: columnNames), + ] + } + } + } + } } extension DatabaseRegion: Equatable { @@ -287,8 +325,8 @@ private struct TableRegion: Equatable { var rowIds: Set? // nil means "all rowids" var isEmpty: Bool { - if let columns = columns, columns.isEmpty { return true } - if let rowIds = rowIds, rowIds.isEmpty { return true } + if let columns, columns.isEmpty { return true } + if let rowIds, rowIds.isEmpty { return true } return false } diff --git a/GRDB/Core/DatabaseRegionObservation.swift b/GRDB/Core/DatabaseRegionObservation.swift index eb95739af4..4f1e1473c0 100644 --- a/GRDB/Core/DatabaseRegionObservation.swift +++ b/GRDB/Core/DatabaseRegionObservation.swift @@ -3,109 +3,6 @@ import Combine #endif import Foundation -/// `DatabaseRegionObservation` tracks transactions that modify a -/// database region. -/// -/// ## Overview -/// -/// Tracked changes are insertions, updates, and deletions that impact the -/// tracked region, whether performed with raw SQL, or . -/// This includes indirect changes triggered by -/// [foreign keys actions](https://www.sqlite.org/foreignkeys.html#fk_actions) -/// or [SQL triggers](https://www.sqlite.org/lang_createtrigger.html). -/// -/// Changes to internal system tables (such as `sqlite_master`) and changes to -/// [`WITHOUT ROWID`](https://www.sqlite.org/withoutrowid.html) tables are -/// not notified. -/// -/// `DatabaseRegionObservation` calls your application right after changes have -/// been committed in the database, and before any other thread had any -/// opportunity to perform further changes. *This is a pretty strong guarantee, -/// that most applications do not really need.* Instead, most applications -/// prefer to be notified with fresh values: make sure you check -/// ``ValueObservation`` before using `DatabaseRegionObservation`. -/// -/// ## DatabaseRegionObservation Usage -/// -/// Create a `DatabaseRegionObservation` with one or several requests to track: -/// -/// ```swift -/// // Tracks the full player table -/// let observation = DatabaseRegionObservation(tracking: Player.all()) -/// ``` -/// -/// Then start the observation from a ``DatabaseQueue`` or ``DatabasePool``: -/// -/// ```swift -/// let cancellable = try observation.start(in: dbQueue) { error in -/// // Handle error -/// } onChange: { (db: Database) in -/// print("Players were changed") -/// } -/// ``` -/// -/// Enjoy the changes notifications: -/// -/// ```swift -/// try dbQueue.write { db in -/// try Player(name: "Arthur").insert(db) -/// } -/// // Prints "Players were changed" -/// ``` -/// -/// You stop the observation by calling the ``DatabaseCancellable/cancel()`` -/// method on the object returned by the `start` method. Cancellation is -/// automatic when the cancellable is deallocated: -/// -/// ```swift -/// cancellable.cancel() -/// ``` -/// -/// `DatabaseRegionObservation` can also be turned into a Combine publisher, or -/// an RxSwift observable (see the companion library -/// [RxGRDB](https://github.com/RxSwiftCommunity/RxGRDB)): -/// -/// ```swift -/// let cancellable = observation.publisher(in: dbQueue).sink { completion in -/// // Handle completion -/// } receiveValue: { (db: Database) in -/// print("Players were changed") -/// } -/// ``` -/// -/// You can feed `DatabaseRegionObservation` with any type that conforms to -/// the ``DatabaseRegionConvertible`` protocol: ``FetchRequest``, -/// ``DatabaseRegion``, ``Table``, etc. For example: -/// -/// ```swift -/// // Observe the score column of the 'player' table -/// let observation = DatabaseRegionObservation( -/// tracking: Player.select(Column("score"))) -/// -/// // Observe the 'score' column of the 'player' table -/// let observation = DatabaseRegionObservation( -/// tracking: SQLRequest("SELECT score FROM player")) -/// -/// // Observe both the 'player' and 'team' tables -/// let observation = DatabaseRegionObservation( -/// tracking: Table("player"), Table("team")) -/// -/// // Observe the full database -/// let observation = DatabaseRegionObservation( -/// tracking: .fullDatabase) -/// ``` -/// -/// ## Topics -/// -/// ### Creating DatabaseRegionObservation -/// -/// - ``init(tracking:)-5ldbe`` -/// - ``init(tracking:)-2nqjd`` -/// -/// ### Observing Database Transactions -/// -/// - ``publisher(in:)`` -/// - ``start(in:onError:onChange:)`` public struct DatabaseRegionObservation { /// A closure that is evaluated when the observation starts, and returns /// the observed database region. @@ -231,7 +128,7 @@ extension DatabaseRegionObservation { } #if canImport(Combine) -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension DatabaseRegionObservation { // MARK: - Publishing Impactful Transactions @@ -243,7 +140,7 @@ extension DatabaseRegionObservation { /// /// Do not reschedule the publisher with `receive(on:options:)` or any /// `Publisher` method that schedules publisher elements. - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func publisher(in writer: some DatabaseWriter) -> DatabasePublishers.DatabaseRegion { DatabasePublishers.DatabaseRegion(self, in: writer) } @@ -264,6 +161,11 @@ private class DatabaseRegionObserver: TransactionObserver { region.isModified(byEventsOfKind: eventKind) } + func databaseDidChange() { + isChanged = true + stopObservingDatabaseChangesUntilNextTransaction() + } + func databaseDidChange(with event: DatabaseEvent) { if region.isModified(by: event) { isChanged = true @@ -284,7 +186,7 @@ private class DatabaseRegionObserver: TransactionObserver { } #if canImport(Combine) -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension DatabasePublishers { /// A publisher that tracks transactions that modify a database region. /// diff --git a/GRDB/Core/DatabaseSnapshot.swift b/GRDB/Core/DatabaseSnapshot.swift index 984ab5c63c..1381c1871c 100644 --- a/GRDB/Core/DatabaseSnapshot.swift +++ b/GRDB/Core/DatabaseSnapshot.swift @@ -59,10 +59,14 @@ import Dispatch /// let snapshot2 = try dbPool.makeSnapshot() /// /// // Guaranteed to be zero -/// let count1 = try snapshot1.read(Player.fetchCount) +/// let count1 = try snapshot1.read { db in +/// try Player.fetchCount(db) +/// } /// /// // Could be anything -/// let count2 = try snapshot2.read(Player.fetchCount) +/// let count2 = try snapshot2.read { db in +/// try Player.fetchCount(db) +/// } /// ``` /// /// `DatabaseSnapshot` inherits its database access methods from the diff --git a/GRDB/Core/DatabaseSnapshotPool.swift b/GRDB/Core/DatabaseSnapshotPool.swift index dcfbd4cb8b..c32b0b1a26 100644 --- a/GRDB/Core/DatabaseSnapshotPool.swift +++ b/GRDB/Core/DatabaseSnapshotPool.swift @@ -104,16 +104,16 @@ public final class DatabaseSnapshotPool { /// /// // Later... Maybe some players have been created. /// // The snapshot is guaranteed to see an empty table of players, though: - /// let count = try snapshot.read(Player.fetchCount) + /// let count = try snapshot.read { db in + /// try Player.fetchCount(db) + /// } /// assert(count == 0) /// ``` /// - /// If any of the following statements are false when the snapshot is - /// created, a ``DatabaseError`` of code `SQLITE_ERROR` is thrown: - /// - /// - The database connection must be in the - /// [WAL mode](https://www.sqlite.org/wal.html). - /// - There must not be a write transaction open. + /// A ``DatabaseError`` of code `SQLITE_ERROR` is thrown if the SQLite + /// database is not in the [WAL mode](https://www.sqlite.org/wal.html), + /// or if this method is called from a write transaction, or if the + /// wal file is missing or truncated (size zero). /// /// Related SQLite documentation: /// @@ -175,9 +175,9 @@ public final class DatabaseSnapshotPool { /// let snapshot = try DatabaseSnapshotPool(path: "/path/to/database.sqlite") /// ``` /// - /// If the database at `path` is not in the - /// [WAL mode](https://www.sqlite.org/wal.html), a ``DatabaseError`` of code - /// `SQLITE_ERROR` is thrown. + /// A ``DatabaseError`` of code `SQLITE_ERROR` is thrown if the SQLite + /// database is not in the [WAL mode](https://www.sqlite.org/wal.html), + /// or if the wal file is missing or truncated (size zero). /// /// Related SQLite documentation: /// @@ -294,7 +294,7 @@ extension DatabaseSnapshotPool: DatabaseSnapshotReader { public func asyncRead(_ value: @escaping (Result) -> Void) { guard let readerPool else { - value(.failure(DatabaseError(resultCode: .SQLITE_MISUSE, message: "Connection is closed"))) + value(.failure(DatabaseError.connectionIsClosed())) return } @@ -322,7 +322,7 @@ extension DatabaseSnapshotPool: DatabaseSnapshotReader { return result } } else { - /// There is no unsafe access to a snapshot. + // There is no unsafe access to a snapshot. return try read(value) } } diff --git a/GRDB/Core/DatabaseValue.swift b/GRDB/Core/DatabaseValue.swift index 2ab1226724..574ea97916 100644 --- a/GRDB/Core/DatabaseValue.swift +++ b/GRDB/Core/DatabaseValue.swift @@ -44,6 +44,7 @@ import Foundation /// ### Creating a DatabaseValue /// /// - ``init(value:)`` +/// - ``init(sqliteStatement:index:)`` /// - ``null`` /// /// ### Accessing the SQLite storage @@ -149,7 +150,7 @@ public struct DatabaseValue: Hashable { } /// Creates a `DatabaseValue` initialized from a raw SQLite statement pointer. - init(sqliteStatement: SQLiteStatement, index: CInt) { + public init(sqliteStatement: SQLiteStatement, index: CInt) { switch sqlite3_column_type(sqliteStatement, index) { case SQLITE_NULL: storage = .null @@ -188,6 +189,34 @@ extension DatabaseValue: StatementBinding { return data.bind(to: sqliteStatement, at: index) } } + + /// Calls the given closure after binding a statement argument. + /// + /// The binding is valid only during the execution of this method. + /// + /// - parameter sqliteStatement: An SQLite statement. + /// - parameter index: 1-based index to statement arguments. + /// - parameter body: The closure to execute when argument is bound. + func withBinding(to sqliteStatement: SQLiteStatement, at index: CInt, do body: () throws -> T) throws -> T { + switch storage { + case .null: + let code = sqlite3_bind_null(sqliteStatement, index) + try checkBindingSuccess(code: code, sqliteStatement: sqliteStatement) + return try body() + case .int64(let int64): + let code = int64.bind(to: sqliteStatement, at: index) + try checkBindingSuccess(code: code, sqliteStatement: sqliteStatement) + return try body() + case .double(let double): + let code = double.bind(to: sqliteStatement, at: index) + try checkBindingSuccess(code: code, sqliteStatement: sqliteStatement) + return try body() + case .string(let string): + return try string.withBinding(to: sqliteStatement, at: index, do: body) + case .blob(let data): + return try data.withBinding(to: sqliteStatement, at: index, do: body) + } + } } extension DatabaseValue: Sendable { } diff --git a/GRDB/Core/DatabaseValueConvertible.swift b/GRDB/Core/DatabaseValueConvertible.swift index 44afac4394..df9b1b6939 100644 --- a/GRDB/Core/DatabaseValueConvertible.swift +++ b/GRDB/Core/DatabaseValueConvertible.swift @@ -1,49 +1,40 @@ -/// A type that can convert itself into and out of a database value. -/// -/// `DatabaseValueConvertible` is adopted by `Bool`, `Int`, `String`, -/// `Date`, etc. -/// -/// To conform to `DatabaseValueConvertible`, provide custom implementations -/// for ``fromDatabaseValue(_:)-21zzv`` and ``databaseValue-1ob9k``. These -/// implementations are ready-made for `RawRepresentable` types whose -/// `RawValue` is `StatementColumnConvertible`, and for `Codable` types. -/// -/// ## Topics -/// -/// ### Creating a Value -/// -/// - ``fromDatabaseValue(_:)-21zzv`` -/// - ``fromMissingColumn()-7iamp`` -/// -/// ### Accessing the DatabaseValue -/// -/// - ``databaseValue-1ob9k`` -/// -/// ### Fetching Values from Raw SQL -/// -/// - ``fetchCursor(_:sql:arguments:adapter:)-6elcz`` -/// - ``fetchAll(_:sql:arguments:adapter:)-1cqyb`` -/// - ``fetchSet(_:sql:arguments:adapter:)-5jene`` -/// - ``fetchOne(_:sql:arguments:adapter:)-qvqp`` -/// -/// ### Fetching Values from a Prepared Statement -/// -/// - ``fetchCursor(_:arguments:adapter:)-4l6af`` -/// - ``fetchAll(_:arguments:adapter:)-3abuc`` -/// - ``fetchSet(_:arguments:adapter:)-6y54n`` -/// - ``fetchOne(_:arguments:adapter:)-3d7ax`` -/// -/// ### Fetching Values from a Request -/// -/// - ``fetchCursor(_:_:)-8q4r6`` -/// - ``fetchAll(_:_:)-9hkqs`` -/// - ``fetchSet(_:_:)-1foke`` -/// - ``fetchOne(_:_:)-o6yj`` -/// -/// ### Supporting Types -/// -/// - ``DatabaseValueCursor`` -/// - ``StatementBinding`` +import Foundation + +// Standard collections `Array`, `Set`, and `Dictionary` do not conform to +// `DatabaseValueConvertible`, on purpose. +// +// Adding `DatabaseValueConvertible` conformance to those collection types +// would litter JSON values in unexpected places, and foster misuse. For +// example, it is better when the code below *does not compile*: +// +// ```swift +// // MISUSE: if Array would conform to DatabaseValueConvertible, this +// // code would compile, and run the incorrect SQLite query +// // `SELECT ... WHERE id IN ('[1,2,3]')`, instead of the expected +// // `SELECT ... WHERE id IN (1, 2, 3)`. +// let ids = [1, 2, 3] +// let players = try Player.fetchAll(db, sql: """ +// SELECT * FROM player WHERE id IN (?) +// """, arguments: [ids]) +// ``` +// +// Correct and fostered versions of the code above are: +// +// ```swift +// // CORRECT (explicit SQLite arguments): +// let ids = [1, 2, 3] +// let questionMarks = databaseQuestionMarks(count: ids.count) // "?,?,?" +// let players = try Player.fetchAll(db, sql: """ +// SELECT * FROM player WHERE id IN (\(questionMarks)) +// """, arguments: StatementArguments(ids)) +// +// // CORRECT (SQL interpolation): +// let ids = [1, 2, 3] +// let request: SQLRequest = """ +// SELECT * FROM player WHERE id IN \(ids) +// """ +// let players = try request.fetchAll(db) +// ``` public protocol DatabaseValueConvertible: SQLExpressible, StatementBinding { /// A database value. var databaseValue: DatabaseValue { get } @@ -70,6 +61,18 @@ public protocol DatabaseValueConvertible: SQLExpressible, StatementBinding { /// /// - returns: A decoded value, or, if decoding is impossible, nil. static func fromMissingColumn() -> Self? + + /// Returns the `JSONDecoder` that decodes the value. + /// + /// This method is dedicated to ``DatabaseValueConvertible`` types that + /// also conform to the standard `Decodable` protocol. + static func databaseJSONDecoder() -> JSONDecoder + + /// Returns the `JSONEncoder` that encodes the value. + /// + /// This method is dedicated to ``DatabaseValueConvertible`` types that + /// also conform to the standard `Encodable` protocol. + static func databaseJSONEncoder() -> JSONEncoder } extension DatabaseValueConvertible { @@ -86,6 +89,41 @@ extension DatabaseValueConvertible { public static func fromMissingColumn() -> Self? { nil // failure. } + + /// Returns the `JSONDecoder` that decodes the value. + /// + /// The default implementation returns a `JSONDecoder` with the + /// following properties: + /// + /// - `dataDecodingStrategy`: `.base64` + /// - `dateDecodingStrategy`: `.millisecondsSince1970` + /// - `nonConformingFloatDecodingStrategy`: `.throw` + public static func databaseJSONDecoder() -> JSONDecoder { + let decoder = JSONDecoder() + decoder.dataDecodingStrategy = .base64 + decoder.dateDecodingStrategy = .millisecondsSince1970 + decoder.nonConformingFloatDecodingStrategy = .throw + return decoder + } + + /// Returns the `JSONEncoder` that encodes the value. + /// + /// The default implementation returns a `JSONEncoder` with the + /// following properties: + /// + /// - `dataEncodingStrategy`: `.base64` + /// - `dateEncodingStrategy`: `.millisecondsSince1970` + /// - `nonConformingFloatEncodingStrategy`: `.throw` + /// - `outputFormatting`: `.sortedKeys` + public static func databaseJSONEncoder() -> JSONEncoder { + let encoder = JSONEncoder() + encoder.dataEncodingStrategy = .base64 + encoder.dateEncodingStrategy = .millisecondsSince1970 + encoder.nonConformingFloatEncodingStrategy = .throw + // guarantee some stability in order to ease value comparison + encoder.outputFormatting = .sortedKeys + return encoder + } } // MARK: - Conversions @@ -159,7 +197,7 @@ public final class DatabaseValueCursor: Databas init(statement: Statement, arguments: StatementArguments? = nil, adapter: (any RowAdapter)? = nil) throws { self._statement = statement - if let adapter = adapter { + if let adapter { // adapter may redefine the index of the leftmost column columnIndex = try CInt(adapter.baseColumnIndex(atIndex: 0, layout: statement)) } else { @@ -167,7 +205,7 @@ public final class DatabaseValueCursor: Databas } // Assume cursor is created for immediate iteration: reset and set arguments - try statement.reset(withArguments: arguments) + try statement.prepareExecution(withArguments: arguments) } deinit { @@ -184,6 +222,11 @@ public final class DatabaseValueCursor: Databas } } +// Explicit non-conformance to Sendable: database cursors must be used from +// a serialized database access dispatch queue. +@available(*, unavailable) +extension DatabaseValueCursor: Sendable { } + /// DatabaseValueConvertible comes with built-in methods that allow to fetch /// cursors, arrays, or single values: /// diff --git a/GRDB/Core/DatabaseWriter.swift b/GRDB/Core/DatabaseWriter.swift index a1ada352c7..b4de8f6fd8 100644 --- a/GRDB/Core/DatabaseWriter.swift +++ b/GRDB/Core/DatabaseWriter.swift @@ -289,14 +289,13 @@ public protocol DatabaseWriter: DatabaseReader { /// ``` /// /// - note: Usage of this method is discouraged, because waiting on the - /// returned ``DatabaseFuture`` blocks a thread. You may prefer the - /// asynchronous version of this method: ``spawnConcurrentRead(_:)``. + /// returned ``DatabaseFuture`` blocks a thread. You may prefer + /// ``spawnConcurrentRead(_:)`` instead. /// - parameter value: A closure which accesses the database. func concurrentRead(_ value: @escaping (Database) throws -> T) -> DatabaseFuture // Exposed for RxGRDB and GRBCombine. Naming is not stabilized. - /// Schedules read-only database operations for execution, and - /// returns immediately. + /// Schedules read-only database operations for execution. /// /// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) /// @@ -311,7 +310,7 @@ public protocol DatabaseWriter: DatabaseReader { /// by the database writer. /// /// In the example below, the number of players is fetched concurrently with - /// the player insertion. Yet the future is guaranteed to return zero: + /// the player insertion. Yet it is guaranteed to return zero: /// /// ```swift /// try writer.writeWithoutTransaction { db in @@ -334,6 +333,10 @@ public protocol DatabaseWriter: DatabaseReader { /// } /// ``` /// + /// - important: The database operations are executed immediately, + /// or asynchronously, depending on the actual class + /// of `DatabaseWriter`. + /// /// - parameter value: A closure which accesses the database. Its argument /// is a `Result` that provides the database connection, or the failure /// that would prevent establishing the read access to the database. @@ -442,10 +445,21 @@ extension DatabaseWriter { // MARK: - Transaction Observers - /// Adds a transaction observer, so that it gets notified of - /// database changes and transactions. + /// Adds a transaction observer to the writer connection, so that it + /// gets notified of database changes and transactions. + /// + /// This method waits until all currently executing database accesses + /// performed by the writer dispatch queue finish executing. + /// At that point, database observation begins. + /// + /// It has no effect on read-only database connections. /// - /// This method has no effect on read-only database connections. + /// For example: + /// + /// ```swift + /// let myObserver = MyObserver() + /// try dbQueue.add(transactionObserver: myObserver) + /// ``` /// /// - parameter transactionObserver: A transaction observer. /// - parameter extent: The duration of the observation. The default is @@ -458,14 +472,25 @@ extension DatabaseWriter { writeWithoutTransaction { $0.add(transactionObserver: transactionObserver, extent: extent) } } - /// Removes a transaction observer. + /// Removes a transaction observer from the writer connection. + /// + /// This method waits until all currently executing database accesses + /// performed by the writer dispatch queue finish executing. + /// At that point, database observation stops. + /// + /// For example: + /// + /// ```swift + /// let myObserver = MyObserver() + /// try dbQueue.remove(transactionObserver: myObserver) + /// ``` public func remove(transactionObserver: some TransactionObserver) { writeWithoutTransaction { $0.remove(transactionObserver: transactionObserver) } } // MARK: - Erasing the content of the database - /// Erases the content of the database. + /// Erase the database: delete all content, drop all tables, etc. @_disfavoredOverload // SR-15150 Async overloading in protocol implementation fails public func erase() throws { try barrierWriteWithoutTransaction { try $0.erase() } @@ -516,7 +541,7 @@ extension DatabaseWriter { /// /// - Parameter filePath: file path for new database @_disfavoredOverload // SR-15150 Async overloading in protocol implementation fails - @available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) + @available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) public func vacuum(into filePath: String) throws { try writeWithoutTransaction { try $0.execute(sql: "VACUUM INTO ?", arguments: [filePath]) @@ -582,7 +607,7 @@ extension DatabaseWriter { /// - throws: The error thrown by `updates`, or any ``DatabaseError`` that /// would happen while establishing the database access or committing /// the transaction. - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func write(_ updates: @Sendable @escaping (Database) throws -> T) async throws -> T { try await withUnsafeThrowingContinuation { continuation in asyncWrite(updates, completion: { _, result in @@ -620,7 +645,7 @@ extension DatabaseWriter { /// /// - parameter updates: A closure which accesses the database. /// - throws: The error thrown by `updates`. - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func writeWithoutTransaction(_ updates: @Sendable @escaping (Database) throws -> T) async throws -> T { try await withUnsafeThrowingContinuation { continuation in asyncWriteWithoutTransaction { db in @@ -672,7 +697,7 @@ extension DatabaseWriter { /// /// - parameter updates: A closure which accesses the database. /// - throws: The error thrown by `updates`. - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func barrierWriteWithoutTransaction( _ updates: @Sendable @escaping (Database) throws -> T) async throws -> T @@ -684,10 +709,10 @@ extension DatabaseWriter { } } - /// Erases the content of the database. + /// Erase the database: delete all content, drop all tables, etc. /// /// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func erase() async throws { try await writeWithoutTransaction { try $0.erase() } } @@ -698,7 +723,7 @@ extension DatabaseWriter { /// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) /// /// Related SQLite documentation: - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func vacuum() async throws { try await writeWithoutTransaction { try $0.execute(sql: "VACUUM") } } @@ -715,7 +740,7 @@ extension DatabaseWriter { /// Related SQLite documentation: /// /// - Parameter filePath: file path for new database - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func vacuum(into filePath: String) async throws { try await writeWithoutTransaction { try $0.execute(sql: "VACUUM INTO ?", arguments: [filePath]) @@ -730,7 +755,7 @@ extension DatabaseWriter { /// Related SQLite documentation: /// /// - Parameter filePath: file path for new database - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func vacuum(into filePath: String) async throws { try await writeWithoutTransaction { try $0.execute(sql: "VACUUM INTO ?", arguments: [filePath]) @@ -775,7 +800,7 @@ extension DatabaseWriter { /// /// - parameter scheduler: A Combine Scheduler. /// - parameter updates: A closure which accesses the database. - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func writePublisher( receiveOn scheduler: some Combine.Scheduler = DispatchQueue.main, updates: @escaping (Database) throws -> Output) @@ -840,7 +865,7 @@ extension DatabaseWriter { /// - parameter scheduler: A Combine Scheduler. /// - parameter updates: A closure which writes in the database. /// - parameter value: A closure which reads from the database. - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func writePublisher( receiveOn scheduler: S = DispatchQueue.main, updates: @escaping (Database) throws -> T, @@ -872,7 +897,7 @@ extension DatabaseWriter { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension DatabasePublishers { /// A publisher that writes into the database. /// @@ -891,7 +916,7 @@ extension DatabasePublishers { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Publisher where Failure == Error { fileprivate func eraseToWritePublisher() -> DatabasePublishers.Write { .init(upstream: self.eraseToAnyPublisher()) @@ -961,6 +986,10 @@ extension AnyDatabaseWriter: DatabaseReader { base.configuration } + public var path: String { + base.path + } + public func close() throws { try base.close() } diff --git a/GRDB/Core/FetchRequest.swift b/GRDB/Core/FetchRequest.swift index a1848aca80..43115cab70 100644 --- a/GRDB/Core/FetchRequest.swift +++ b/GRDB/Core/FetchRequest.swift @@ -58,10 +58,6 @@ /// - ``makePreparedRequest(_:forSingleResult:)`` /// - ``PreparedRequest`` /// -/// ### Database Observation Support -/// -/// - ``databaseRegion(_:)`` -/// /// ### Adapting the Fetched Rows /// /// - ``adapted(_:)`` @@ -111,6 +107,28 @@ extension FetchRequest { // MARK: - PreparedRequest +/// A closure executed before a supplementary fetch is performed. +/// +/// Support for `Database.dumpRequest`. +/// +/// - parameter request: The supplementary request +/// - parameter keyPath: The key path target of the supplementary fetch. +typealias WillExecuteSupplementaryRequest = (_ request: AnyFetchRequest, _ keyPath: [String]) throws -> Void + +/// A closure that performs supplementary fetches. +/// +/// Support for eager loading of hasMany associations. +/// +/// - parameter db: A database connection. +/// - parameter rows: The rows that are modified by the supplementary fetch. +/// - parameter willExecuteSupplementaryRequest: A closure to execute before +/// performing supplementary fetches. +typealias SupplementaryFetch = ( + _ db: Database, + _ rows: [Row], + _ willExecuteSupplementaryRequest: WillExecuteSupplementaryRequest?) +throws -> Void + /// A `PreparedRequest` is a request that is ready to be executed. public struct PreparedRequest { /// A prepared statement with bound parameters. @@ -119,13 +137,14 @@ public struct PreparedRequest { /// An eventual adapter for rows fetched by the select statement. public var adapter: (any RowAdapter)? + /// A closure that performs supplementary fetches. /// Support for eager loading of hasMany associations. - var supplementaryFetch: ((Database, [Row]) throws -> Void)? + var supplementaryFetch: SupplementaryFetch? init( statement: Statement, adapter: (any RowAdapter)?, - supplementaryFetch: ((Database, [Row]) throws -> Void)? = nil) + supplementaryFetch: SupplementaryFetch? = nil) { self.statement = statement self.adapter = adapter @@ -133,6 +152,11 @@ public struct PreparedRequest { } } +// Explicit non-conformance to Sendable: `PreparedRequest` contains +// a statement. +@available(*, unavailable) +extension PreparedRequest: Sendable { } + extension PreparedRequest: Refinable { } // MARK: - AdaptedFetchRequest @@ -141,7 +165,9 @@ extension FetchRequest { /// Returns an adapted request. /// /// The returned request performs an identical database query, but adapts - /// the fetched rows. See ``RowAdapter``. + /// the fetched rows. See ``RowAdapter``, and + /// ``splittingRowAdapters(columnCounts:)`` for a sample code that uses + /// `adapted(_:)`. /// /// - parameter adapter: A closure that accepts a database connection and /// returns a row adapter. @@ -215,8 +241,8 @@ public struct AnyFetchRequest { /// /// // AnyFetchRequest /// let rowRequest = playerRequest.asRequest(of: Row.self) - public func asRequest(of type: RowDecoder.Type) -> AnyFetchRequest { - AnyFetchRequest(request: request) + public func asRequest(of type: T.Type) -> AnyFetchRequest { + AnyFetchRequest(request: request) } } diff --git a/GRDB/Core/Row.swift b/GRDB/Core/Row.swift index b37822c022..e11f35d1ff 100644 --- a/GRDB/Core/Row.swift +++ b/GRDB/Core/Row.swift @@ -245,6 +245,12 @@ public final class Row { } } +// Explicit non-conformance to Sendable: a row contains transient +// information. TODO GRDB7: split non sendable statement rows from sendable +// copied rows. +@available(*, unavailable) +extension Row: Sendable { } + extension Row { // MARK: - Columns @@ -682,7 +688,7 @@ extension Row { /// ``JoinableRequest/including(optional:)`` request methods define scopes /// named after the key of included associations between record types. /// - /// A depth-first search is performed in all available scopes in the row, + /// A breadth-first search is performed in all available scopes in the row, /// recursively. /// /// A fatal error is raised if the scope is not available, or contains only @@ -726,7 +732,7 @@ extension Row { /// ``JoinableRequest/including(optional:)`` request methods define scopes /// named after the key of included associations between record types. /// - /// A depth-first search is performed in all available scopes in the row, + /// A breadth-first search is performed in all available scopes in the row, /// recursively. /// /// The result is nil if the scope is not available, or contains only @@ -879,7 +885,7 @@ extension Row { /// A view on the scopes tree defined by row adapters. /// /// The returned object provides an access to all available scopes in - /// the row, recursively. For any given scope identifier, a depth-first + /// the row, recursively. For any given scope identifier, a breadth-first /// search is performed. /// /// Row scopes can be defined manually, with ``ScopeAdapter``. @@ -1152,8 +1158,7 @@ extension Row { /// Nil is returned if the scope is not available, or contains only /// null values. /// - /// See - /// for more information. + /// See ``splittingRowAdapters(columnCounts:)`` for a sample code. func decodeIfPresent( _ type: Record.Type = Record.self, forKey scope: String) @@ -1194,8 +1199,7 @@ extension Row { /// A fatal error is raised if the scope is not available, or contains only /// null values. /// - /// See - /// for more information. + /// See ``splittingRowAdapters(columnCounts:)`` for a sample code. func decode( _ type: Record.Type = Record.self, forKey scope: String) @@ -1366,7 +1370,7 @@ public final class RowCursor: DatabaseCursor { self._row = try Row(statement: statement).adapted(with: adapter, layout: statement) // Assume cursor is created for immediate iteration: reset and set arguments - try statement.reset(withArguments: arguments) + try statement.prepareExecution(withArguments: arguments) } deinit { @@ -1379,6 +1383,11 @@ public final class RowCursor: DatabaseCursor { public func _element(sqliteStatement: SQLiteStatement) -> Row { _row } } +// Explicit non-conformance to Sendable: database cursors must be used from +// a serialized database access dispatch queue. +@available(*, unavailable) +extension RowCursor: Sendable { } + extension Row { // MARK: - Fetching From Prepared Statement @@ -1741,7 +1750,7 @@ extension Row { public static func fetchAll(_ db: Database, _ request: some FetchRequest) throws -> [Row] { let request = try request.makePreparedRequest(db, forSingleResult: false) let rows = try fetchAll(request.statement, adapter: request.adapter) - try request.supplementaryFetch?(db, rows) + try request.supplementaryFetch?(db, rows, nil) return rows } @@ -1774,7 +1783,7 @@ extension Row { let request = try request.makePreparedRequest(db, forSingleResult: false) if let supplementaryFetch = request.supplementaryFetch { let rows = try fetchAll(request.statement, adapter: request.adapter) - try supplementaryFetch(db, rows) + try supplementaryFetch(db, rows, nil) return Set(rows) } else { return try fetchSet(request.statement, adapter: request.adapter) @@ -1811,7 +1820,7 @@ extension Row { guard let row = try fetchOne(request.statement, adapter: request.adapter) else { return nil } - try request.supplementaryFetch?(db, [row]) + try request.supplementaryFetch?(db, [row], nil) return row } } @@ -2061,7 +2070,7 @@ typealias RowIndex = Row.Index extension Row { /// An index to a (column, value) pair in a ``Row``. - public struct Index { + public struct Index: Sendable { let index: Int init(_ index: Int) { self.index = index } } diff --git a/GRDB/Core/RowAdapter.swift b/GRDB/Core/RowAdapter.swift index c3902faab6..a077e58152 100644 --- a/GRDB/Core/RowAdapter.swift +++ b/GRDB/Core/RowAdapter.swift @@ -1,27 +1,137 @@ import Foundation -/// Returns an array of row adapters that split a row according to the input -/// number of columns. +/// Returns an array of row adapters that split a row according to the +/// provided numbers of columns. /// -/// For example: +/// This method is useful for splitting a row into chunks. +/// +/// For example, let's consider the following SQL query: +/// +/// ```swift +/// let sql = """ +/// SELECT player.*, team.* +/// FROM player +/// LEFT JOIN team ON team.id = player.teamId +/// WHERE player.id = ? +/// """ +/// ``` +/// +/// The resulting rows contains columns from both player and team tables: /// /// ```swift -/// let sql = "SELECT 1, 2,3,4, 5,6, 7,8" -/// // <.><. . .><. .><. .> -/// let adapters = splittingRowAdapters([1, 3, 2]) +/// // [id: 1, name: "Arthur", teamId: 42, id: 42, name: "Reds"] +/// // <---------------------------------><--------------------> +/// // player columns team columns +/// let row = try Row.fetchOne(db, sql: sql, arguments: [1]) +/// ``` +/// +/// Because some columns have the same name (`id` and `name`), it is +/// difficult to access the team columns. +/// +/// `splittingRowAdapters` and ``ScopeAdapter`` make it possible to +/// access player and team columns independently, with row ``Row/scopes``: +/// +/// ```swift +/// let adapters = try splittingRowAdapters([ +/// db.columns(in: "player").count, +/// db.columns(in: "team").count, +/// ]) /// let adapter = ScopeAdapter([ -/// "a": adapters[0], -/// "b": adapters[1], -/// "c": adapters[2], -/// "d": adapters[3]]) -/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter) -/// row.scopes["a"] // [1] -/// row.scopes["b"] // [2, 3, 4] -/// row.scopes["c"] // [5, 6] -/// row.scopes["d"] // [7, 8] +/// "player": adapters[0], +/// "team": adapters[1], +/// ]) +/// if let row = try Row.fetchOne(db, sql: sql, arguments: [1], adapter: adapter) { +/// // A Row that only contains player columns +/// // [id: 1, name: "Arthur", teamId: 42] +/// row.scopes["player"] +/// +/// // A Row that only contains team columns +/// // [id: 42, name: "Reds"] +/// row.scopes["team"] +/// } +/// ``` +/// +/// Decoding ``FetchableRecord`` types is easy: +/// +/// ```swift +/// if let row = try Row.fetchOne(db, sql: sql, arguments: [1], adapter: adapter) { +/// // Player(id: 1, name: "Arthur", teamId: 42) +/// let player: Player = row["player"] +/// +/// // Team(id: 42, name: "Reds") +/// // nil if the LEFT JOIN has fetched NULL team columns +/// if let team: Team? = row["team"] +/// } +/// ``` +/// +/// You can package this technique in a dedicated type, as in the next +/// example. It enhances the previous sample codes with: +/// +/// - Support for record types that customize their fetched columns +/// with ``TableRecord/databaseSelection-7iphs``. +/// - ``SQLRequest`` and its support for [SQL Interpolation](https://github.com/groue/GRDB.swift/blob/master/Documentation/SQLInterpolation.md). +/// - ``FetchRequest/adapted(_:)`` for building a request that embeds the +/// row adapters. +/// +/// ```swift +/// struct Player: TableRecord, FetchableRecord { ... } +/// struct Team: TableRecord, FetchableRecord { ... } +/// +/// struct PlayerInfo { +/// var player: Player +/// var team: Team? +/// } +/// +/// extension PlayerInfo: FetchableRecord { +/// init(row: Row) { +/// player = row["player"] +/// team = row["team"] +/// } +/// } +/// +/// extension PlayerInfo { +/// /// The request for the player info, given a player id +/// static func filter(playerId: Int64) -> some FetchRequest { +/// // Build SQL request with SQL interpolation +/// let request: SQLRequest = """ +/// SELECT +/// \(columnsOf: Player.self), -- Instead of player.* +/// \(columnsOf: Team.self), -- Instead of team.* +/// FROM player +/// LEFT JOIN team ON team.id = player.teamId +/// WHERE player.id = \(playerId) +/// """ +/// +/// // Returns an adapted request that defines the player and team +/// // scopes in the fetched row +/// return request.adapted { db in +/// let adapters = try splittingRowAdapters(columnCounts: [ +/// Player.numberOfSelectedColumns(db), +/// Team.numberOfSelectedColumns(db), +/// ]) +/// return ScopeAdapter([ +/// "player": adapters[0], +/// "team": adapters[1], +/// ]) +/// } +/// } +/// } +/// +/// // Usage +/// try dbQueue.read { db in +/// if let playerInfo = try PlayerInfo.filter(playerId: 1).fetchOne(db) { +/// print(playerInfo.player) // Player(id: 1, name: "Arthur", teamId: 42) +/// print(playerInfo.team) // Team(id: 42, name: "Reds") +/// } +/// } /// ``` +/// +/// - parameter columnCounts: An array of row chunk lengths. +/// - returns: An array of row adapters that split a row into as many chunks +/// as the number of elements in `columnCounts`, plus one (the row adapter +/// for all columns that remain on the right of the last chunk). public func splittingRowAdapters(columnCounts: [Int]) -> [any RowAdapter] { - guard !columnCounts.isEmpty else { + if columnCounts.isEmpty { // Identity adapter return [SuffixRowAdapter(fromIndex: 0)] } @@ -149,25 +259,57 @@ extension Statement: _RowLayout { /// A type that helps two incompatible row interfaces working together. /// -/// You provide row adapters to methods that fetch. For example: +/// Row adapters present database rows in the way expected by the +/// row consumers. +/// +/// For example, when a row consumer expects a column named "consumed", but +/// the raw row has a column named "produced", the ``ColumnMapping`` row +/// adapter comes in handy: /// /// ```swift -/// try dbQueue.read { -/// // An adapter that ignores the first two columns -/// let adapter = SuffixRowAdapter(fromIndex: 2) -/// let sql = "SELECT 1 AS foo, 2 AS bar, 3 AS baz" +/// // Feeds the "consumed" column from "produced": +/// let adapter = ColumnMapping(["consumed": "produced"]) +/// let sql = "SELECT 'Hello' AS produced" +/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! /// -/// // [baz:3] -/// try Row.fetchOne(db, sql: sql, adapter: adapter) -/// } +/// // [consumed:"Hello"] +/// print(row) +/// +/// // "Hello" +/// print(row["consumed"]) +/// ``` +/// +/// The raw fetched columns are not lost (see ``Row/unadapted``): +/// +/// ```swift +/// // ▿ [consumed:"Hello"] +/// // unadapted: [produced:"Hello"] +/// print(row.debugDescription) +/// +/// // [produced:"Hello"] +/// print(row.unadapted) /// ``` /// +/// There are several situations where row adapters are useful. Among them: +/// +/// - Adapters help disambiguate columns with identical names, which may +/// happen when you select columns from several tables. +/// See ``splittingRowAdapters(columnCounts:)`` for some sample code. +/// +/// - Adapters help when SQLite outputs unexpected column names, which may +/// happen with some subqueries. See ``RenameColumnAdapter`` for +/// an example. +/// /// ## Topics /// -/// ### Splitting a Row +/// ### Splitting a Row into Chunks /// /// - ``splittingRowAdapters(columnCounts:)`` /// +/// ### Adding Scopes to an Adapter +/// +/// - ``addingScopes(_:)`` +/// /// ### Built-in Adapters /// /// - ``ColumnMapping`` @@ -222,9 +364,22 @@ extension RowAdapter { } } -/// EmptyRowAdapter is a row adapter that hides all columns. -public struct EmptyRowAdapter: RowAdapter { - /// Creates an EmptyRowAdapter +/// `EmptyRowAdapter` is a row adapter that hides all columns. +/// +/// For example: +/// +/// ```swift +/// let adapter = EmptyRowAdapter() +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c" +/// +/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// row.isEmpty // true +/// ``` +/// +/// This limit adapter may turn out useful in some narrow use cases. You'll +/// be happy to find it when you need it. +public struct EmptyRowAdapter: RowAdapter, Sendable { + /// Creates an `EmptyRowAdapter`. public init() { } public func _layoutedAdapter(from layout: some _RowLayout) throws -> any _LayoutedRowAdapter { @@ -232,19 +387,28 @@ public struct EmptyRowAdapter: RowAdapter { } } -/// ColumnMapping is a row adapter that maps column names. +/// `ColumnMapping` is a row adapter that maps column names. +/// +/// Build a `ColumnMapping` with a dictionary whose keys +/// are adapted column names, and values the column names in the base row: +/// +/// ```swift +/// // Feeds "newA" from "a", and "newB" from "b": +/// let adapter = ColumnMapping(["newA": "a", "newB": "b"]) +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c" /// -/// let adapter = ColumnMapping(["foo": "bar"]) -/// let sql = "SELECT 'foo' AS foo, 'bar' AS bar, 'baz' AS baz" +/// // [newA:0, newB:1] +/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// ``` /// -/// // [foo:"bar"] -/// try Row.fetchOne(db, sql: sql, adapter: adapter) -public struct ColumnMapping: RowAdapter { +/// Note that columns that are not present in the dictionary are not present +/// in the resulting adapted row. +public struct ColumnMapping: RowAdapter, Sendable { /// A dictionary from mapped column names to column names in a base row. let mapping: [String: String] - /// Creates a ColumnMapping with a dictionary from mapped column names to - /// column names in a base row. + /// Creates a `ColumnMapping` with a dictionary from mapped column names + /// to column names in a base row. public init(_ mapping: [String: String]) { self.mapping = mapping } @@ -269,14 +433,18 @@ public struct ColumnMapping: RowAdapter { } } -/// SuffixRowAdapter is a row adapter that hides the first columns in a row. +/// `SuffixRowAdapter` hides the leftmost columns in a row. +/// +/// For example: /// -/// let adapter = SuffixRowAdapter(fromIndex: 2) -/// let sql = "SELECT 1 AS foo, 2 AS bar, 3 AS baz" +/// ```swift +/// let adapter = SuffixRowAdapter(fromIndex: 2) +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d" /// -/// // [baz:3] -/// try Row.fetchOne(db, sql: sql, adapter: adapter) -public struct SuffixRowAdapter: RowAdapter { +/// // [c:2, d: 3] +/// try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// ``` +public struct SuffixRowAdapter: RowAdapter, Sendable { /// The suffix index let index: Int @@ -294,14 +462,18 @@ public struct SuffixRowAdapter: RowAdapter { } } -/// RangeRowAdapter is a row adapter that only exposes a range of columns. +/// `RangeRowAdapter` is a row adapter that only exposes a range of columns. +/// +/// For example: /// -/// let adapter = RangeRowAdapter(1..<3) -/// let sql = "SELECT 1 AS foo, 2 AS bar, 3 AS baz, 4 as qux" +/// ```swift +/// let adapter = RangeRowAdapter(1..<3) +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d" /// -/// // [bar:2 baz:3] -/// try Row.fetchOne(db, sql: sql, adapter: adapter) -public struct RangeRowAdapter: RowAdapter { +/// // [b:1 c:2] +/// try Row.fetchOne(db, sql: sql, adapter: adapter) +/// ``` +public struct RangeRowAdapter: RowAdapter, Sendable { /// The range let range: CountableRange @@ -322,28 +494,65 @@ public struct RangeRowAdapter: RowAdapter { } } -/// `ScopeAdapter` is a row adapter that lets you define scopes on rows. +/// `ScopeAdapter` is a row adapter that defines row scopes. +/// +/// `ScopeAdapter` does not change the columns and values of the fetched +/// row. Instead, it defines *scopes* based on other adapter, which you +/// access through the ``Row/scopes`` property of the fetched rows. +/// +/// For example: /// -/// // Two adapters -/// let fooAdapter = ColumnMapping(["value": "foo"]) -/// let barAdapter = ColumnMapping(["value": "bar"]) +/// ```swift +/// let adapter = ScopeAdapter([ +/// "left": RangeRowAdapter(0..<2), +/// "right": RangeRowAdapter(2..<4)]) +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d" +/// +/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! /// -/// // Define scopes -/// let adapter = ScopeAdapter([ -/// "foo": fooAdapter, -/// "bar": barAdapter]) +/// row // [a:0 b:1 c:2 d:3] +/// row.scopes["left"] // [a:0 b:1] +/// row.scopes["right"] // [c:2 d:3] +/// row.scopes["missing"] // nil +/// ``` /// -/// // Fetch -/// let sql = "SELECT 'foo' AS foo, 'bar' AS bar" -/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// Scopes can be nested: /// -/// // Scoped rows: -/// if let fooRow = row.scopes["foo"] { -/// fooRow["value"] // "foo" -/// } -/// if let barRow = row.scopes["bar"] { -/// barRow["value"] // "bar" -/// } +/// ```swift +/// let adapter = ScopeAdapter([ +/// "left": ScopeAdapter([ +/// "left": RangeRowAdapter(0..<1), +/// "right": RangeRowAdapter(1..<2)]), +/// "right": ScopeAdapter([ +/// "left": RangeRowAdapter(2..<3), +/// "right": RangeRowAdapter(3..<4)]) +/// ]) +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d" +/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// +/// let leftRow = row.scopes["left"]! +/// leftRow.scopes["left"] // [a:0] +/// leftRow.scopes["right"] // [b:1] +/// +/// let rightRow = row.scopes["right"]! +/// rightRow.scopes["left"] // [c:2] +/// rightRow.scopes["right"] // [d:3] +/// ``` +/// +/// Any adapter can be extended with scopes, with +/// ``RowAdapter/addingScopes(_:)``: +/// +/// ```swift +/// let baseAdapter = RangeRowAdapter(0..<2) +/// let adapter = baseAdapter.addingScopes([ +/// "remainder": SuffixRowAdapter(fromIndex: 2) +/// ]) +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d" +/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// +/// row // [a:0 b:1] +/// row.scopes["remainder"] // [c:2 d:3] +/// ``` public struct ScopeAdapter: RowAdapter { /// The base adapter @@ -363,7 +572,7 @@ public struct ScopeAdapter: RowAdapter { /// /// - parameter scopes: A dictionary that maps scope names to /// row adapters. - public init(_ scopes: [String: RowAdapter]) { + public init(_ scopes: [String: any RowAdapter]) { // Use SuffixRowAdapter(fromIndex: 0) as the identity adapter self.init(base: SuffixRowAdapter(fromIndex: 0), scopes: scopes) } @@ -423,11 +632,29 @@ struct ChainedAdapter: RowAdapter { /// /// For example: /// -/// let adapter = RenameColumnAdapter { $0 + "rrr" } -/// let sql = "SELECT 'foo' AS foo, 'bar' AS bar, 'baz' AS baz" +/// ```swift +/// let adapter = RenameColumnAdapter { column in column + "rrr" } +/// let sql = "SELECT 0 AS a, 1 AS b, 2 AS c" +/// +/// // [arrr:0, brrr:1, crrr:2] +/// let row = try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// ``` +/// +/// This adapter is useful when subqueries contain duplicated column names: +/// +/// ```swift +/// let sql = "SELECT * FROM (SELECT 1 AS id, 2 AS id)" +/// +/// // Prints ["id", "id:1"] +/// // Note the "id:1" column, generated by SQLite. +/// let row = try Row.fetchOne(db, sql: sql)! +/// print(Array(row.columnNames)) /// -/// // [foorrr:"foo", barrrr:"bar", bazrrr:"baz"] -/// try Row.fetchOne(db, sql: sql, adapter: adapter) +/// // Drop the `:...` suffix, and prints ["id", "id"] +/// let adapter = RenameColumnAdapter { String($0.prefix(while: { $0 != ":" })) } +/// let adaptedRow = try Row.fetchOne(db, sql: sql, adapter: adapter)! +/// print(Array(adaptedRow.columnNames)) +/// ``` public struct RenameColumnAdapter: RowAdapter { let transform: (String) -> String diff --git a/GRDB/Core/SQL.swift b/GRDB/Core/SQL.swift index 644251c811..4246cf202e 100644 --- a/GRDB/Core/SQL.swift +++ b/GRDB/Core/SQL.swift @@ -121,8 +121,8 @@ public struct SQL { fileprivate func qualified(with alias: TableAlias) -> Element { switch self { case .sql: - /// A raw SQL string can't be qualified with a table alias, - /// because we can't parse it. + // A raw SQL string can't be qualified with a table alias, + // because we can't parse it. return self case .subquery: // Subqueries don't need table alias diff --git a/GRDB/Core/SQLRequest.swift b/GRDB/Core/SQLRequest.swift index 8a0bd2043c..6ec272c57f 100644 --- a/GRDB/Core/SQLRequest.swift +++ b/GRDB/Core/SQLRequest.swift @@ -206,9 +206,9 @@ extension SQLRequest: FetchRequest { switch cache { case .none: statement = try db.makeStatement(sql: sql) - case .public?: + case .public: statement = try db.cachedStatement(sql: sql) - case .internal?: + case .internal: statement = try db.internalCachedStatement(sql: sql) } try statement.setArguments(context.arguments) diff --git a/GRDB/Core/SerializedDatabase.swift b/GRDB/Core/SerializedDatabase.swift index bcec3e9f75..f1dfe05721 100644 --- a/GRDB/Core/SerializedDatabase.swift +++ b/GRDB/Core/SerializedDatabase.swift @@ -14,6 +14,9 @@ final class SerializedDatabase { /// The dispatch queue private let queue: DispatchQueue + /// If true, overrides `configuration.allowsUnsafeTransactions`. + private var allowsUnsafeTransactions = false + init( path: String, configuration: Configuration = Configuration(), @@ -76,10 +79,25 @@ final class SerializedDatabase { } } - /// Synchronously executes a block the serialized dispatch queue, and - /// returns its result. + /// Executes database operations, returns their result after they have + /// finished executing, and allows or forbids long-lived transactions. + /// + /// This method is not reentrant. + /// + /// - parameter allowingLongLivedTransaction: When true, the + /// ``Configuration/allowsUnsafeTransactions`` configuration flag is + /// ignored until this method is called again with false. + func sync(allowingLongLivedTransaction: Bool, _ body: (Database) throws -> T) rethrows -> T { + try sync { db in + self.allowsUnsafeTransactions = allowingLongLivedTransaction + return try body(db) + } + } + + /// Executes database operations, and returns their result after they + /// have finished executing. /// - /// This method is *not* reentrant. + /// This method is not reentrant. func sync(_ block: (Database) throws -> T) rethrows -> T { // Three different cases: // @@ -122,8 +140,23 @@ final class SerializedDatabase { } } - /// Synchronously executes a block the serialized dispatch queue, and - /// returns its result. + /// Executes database operations, returns their result after they have + /// finished executing, and allows or forbids long-lived transactions. + /// + /// This method is reentrant. + /// + /// - parameter allowingLongLivedTransaction: When true, the + /// ``Configuration/allowsUnsafeTransactions`` configuration flag is + /// ignored until this method is called again with false. + func reentrantSync(allowingLongLivedTransaction: Bool, _ body: (Database) throws -> T) rethrows -> T { + try reentrantSync { db in + self.allowsUnsafeTransactions = allowingLongLivedTransaction + return try body(db) + } + } + + /// Executes database operations, and returns their result after they + /// have finished executing. /// /// This method is reentrant. func reentrantSync(_ block: (Database) throws -> T) rethrows -> T { @@ -189,7 +222,7 @@ final class SerializedDatabase { } } - /// Asynchronously executes a block in the serialized dispatch queue. + /// Schedules database operations for execution, and returns immediately. func async(_ block: @escaping (Database) -> Void) { queue.async { block(self.db) @@ -242,7 +275,7 @@ final class SerializedDatabase { line: UInt = #line) { GRDBPrecondition( - configuration.allowsUnsafeTransactions || !db.isInsideTransaction, + allowsUnsafeTransactions || configuration.allowsUnsafeTransactions || !db.isInsideTransaction, message(), file: file, line: line) diff --git a/GRDB/Core/Statement.swift b/GRDB/Core/Statement.swift index 39e98827f2..e1a90a1082 100644 --- a/GRDB/Core/Statement.swift +++ b/GRDB/Core/Statement.swift @@ -25,59 +25,6 @@ extension String { } } -/// A prepared statement. -/// -/// You create prepared statements from a ``Database`` instance. For example: -/// -/// ```swift -/// try dbQueue.write { db in -/// let statement = try db.makeStatement(sql: """ -/// DELETE FROM player WHERE id = ? -/// """) -/// try statement.execute(arguments: [1])! -/// try statement.execute(arguments: [12])! -/// } -/// ``` -/// -/// To fetch rows and values from a prepared statement, use a fetching method of -///``Row``, ``DatabaseValueConvertible``, or ``FetchableRecord``: -/// -/// ```swift -/// try dbQueue.read { db in -/// let statement = try db.makeStatement(sql: """ -/// SELECT name FROM player WHERE id = ? -/// """) -/// let name1 = try String.fetchOne(statement, arguments: [1])! -/// let name2 = try String.fetchOne(statement, arguments: [12])! -/// } -/// ``` -/// -/// Related SQLite documentation: -/// -/// ## Topics -/// -/// ### Executing a Prepared Statement -/// -/// - ``execute(arguments:)`` -/// -/// ### Arguments -/// -/// - ``arguments`` -/// - ``setArguments(_:)`` -/// - ``setUncheckedArguments(_:)`` -/// - ``validateArguments(_:)`` -/// - ``StatementArguments`` -/// -/// ### Statement Informations -/// -/// - ``columnCount`` -/// - ``columnNames`` -/// - ``databaseRegion`` -/// - ``index(ofColumn:)`` -/// - ``isReadonly`` -/// - ``sql`` -/// - ``sqliteStatement`` -/// - ``SQLiteStatement`` public final class Statement { enum TransactionEffect { case beginTransaction @@ -95,20 +42,35 @@ public final class Statement { public var sql: String { SchedulingWatchdog.preconditionValidQueue(database) - // trim white space and semicolumn for homogeneous output + // trim white space and semicolon for homogeneous output return String(cString: sqlite3_sql(sqliteStatement)).trimmedSQLStatement } /// The column names, ordered from left to right. public lazy var columnNames: [String] = { + // swiftlint:disable:next redundant_self_in_closure let sqliteStatement = self.sqliteStatement - return (0.., - statementEnd: UnsafeMutablePointer?>, + statementStart: UnsafePointer, + statementEnd: UnsafeMutablePointer?>, prepFlags: CUnsignedInt) throws { SchedulingWatchdog.preconditionValidQueue(database) @@ -175,7 +137,7 @@ public final class Statement { database.sqliteConnection, statementStart, -1, prepFlags, &sqliteStatement, statementEnd) #else - if #available(iOS 12.0, OSX 10.14, tvOS 12.0, watchOS 5.0, *) { + if #available(iOS 12, macOS 10.14, tvOS 12, watchOS 5, *) { // SQLite 3.20+ code = sqlite3_prepare_v3( database.sqliteConnection, statementStart, -1, prepFlags, &sqliteStatement, statementEnd) @@ -209,25 +171,100 @@ public final class Statement { // MARK: Arguments - private var argumentsNeedValidation = true - + /// Whether arguments are valid and bound inside the SQLite statement. + /// + /// If true, arguments are considered valid, and they are bound in + /// the SQLite statement: + /// + /// - **Valid**: Arguments match the statement expectations, or the user has + /// called ``setUncheckedArguments(_:)``. + /// - **Bound**: The SQLite bindings are set. String and blob arguments + /// are bound with SQLITE_TRANSIENT (copied and managed by SQLite). + /// + /// When false, arguments have not been validated yet, or they are + /// not bound. + /// + /// - Not validated yet: this is the initial default (non-validated + /// empty arguments) + /// + /// ```swift + /// // Default arguments are empty, argumentsAreValidAndBound is + /// // false. The statement needs one argument. + /// let statement = try db.makeStatement(sql: """ + /// INSERT INTO t VALUES (?) + /// """) + /// + /// // Because argumentsAreValidAndBound is false, we validate the + /// // empty arguments, and throw SQLITE_MISUSE: wrong number + /// // of statement arguments. + /// try statement.execute() + /// ``` + /// + /// - Not bound: this is the case after we have performed an optimized + /// execution with temporary bindings that avoid copying strings + /// and blobs: + /// + /// ```swift + /// let statement = try db.makeStatement(sql: """ + /// INSERT INTO t VALUES (?) + /// """) + /// // Arguments are set, and execution is performed with + /// // temporary bindings. + /// try statement.execute(arguments: ["Hello"]) + /// // <- Here statement.arguments is ["Hello"] + /// // <- Here statement.argumentsAreValidAndBound is false + /// ``` + /// + /// See `withArguments(_:do:)`. + private var argumentsAreValidAndBound = false + + /// The statement arguments. They may be bound, or not, in the SQLite + /// statement. See `argumentsAreValidAndBound`. private var _arguments = StatementArguments() lazy var sqliteArgumentCount: Int = { - Int(sqlite3_bind_parameter_count(self.sqliteStatement)) + Int(sqlite3_bind_parameter_count(sqliteStatement)) }() - // Returns ["id", nil", "name"] for "INSERT INTO table VALUES (:id, ?, :name)" + // Returns ["id", nil, "name"] for "INSERT INTO table VALUES (:id, ?, :name)" fileprivate lazy var sqliteArgumentNames: [String?] = { - (0..(_ arguments: StatementArguments, do body: () throws -> T) throws -> T { + // Validate + var consumedArguments = arguments + let bindings = try consumedArguments.extractBindings(forStatement: self, allowingRemainingValues: false) + + // Reset and bind arguments (temporarily) + try reset() + _arguments = arguments + argumentsAreValidAndBound = false + clearBindings() + + defer { + // Don't leave the SQLite statement in an invalid state + // (temporary bindings that point to undefined memory). + clearBindings() + } + + return try withBindings(bindings, to: sqliteStatement, do: body) + } + // 1-based index func bind(_ value: some StatementBinding, at index: CInt) { let code = value.bind(to: sqliteStatement, at: index) @@ -369,14 +440,44 @@ public final class Statement { } } - func reset(withArguments arguments: StatementArguments?) throws { - // Force arguments validity: it is a programmer error to provide - // arguments that do not match the statement. - if let arguments = arguments { - try setArguments(arguments) - } else if argumentsNeedValidation { + /// Convenience method that resets, sets arguments if needed, and checks + /// arguments validity. + /// + /// - parameter newArguments: if not nil, this method sets arguments. + func prepareExecution(withArguments newArguments: StatementArguments? = nil) throws { + if let newArguments { + try setArguments(newArguments) // calls reset() + return + } + + if argumentsAreValidAndBound { try reset() - try validateArguments(self.arguments) + } else { + // Arguments needs to be validated, or bound. + if arguments.isEmpty { + // Only reset and perform validation. + try reset() + try validateArguments(arguments) + } else { + // The `setArguments` method binds and validates, and that's + // exactly what we want to do. + // + // To get there, perform statement.execute() after + // statement.execute(arguments:): + // + // // Step 1 + // // Optimized execution with temporary bindings in order + // // to avoid copying strings and blobs: after execution, + // // arguments are set, but bindings have been cleared, + // // and argumentsAreValidAndBound is false. + // try statement.execute(arguments: StatementArguments(person)!) + // + // // Step 2 (we are here). Stop using temporary + // // bindings because user explicitly opt ins for + // // permanent ones. + // try statement.execute() + try setArguments(arguments) // calls reset() + } } } @@ -385,35 +486,83 @@ public final class Statement { /// For example: /// /// ```swift - /// let dbQueue = try DatabaseQueue() /// try dbQueue.write { db in /// // Statement without argument - /// let createTableStatement = try db.makeStatement(sql: """ + /// let statement = try db.makeStatement(sql: """ /// CREATE TABLE player ( /// id INTEGER PRIMARY KEY AUTOINCREMENT, /// name TEXT NOT NULL /// ) /// """) - /// try createTableStatement.execute() + /// try statement.execute() + /// } /// + /// try dbQueue.write { db in /// // Statement with argument - /// let insertStatement = try db.makeStatement(sql: """ + /// let statement = try db.makeStatement(sql: """ /// INSERT INTO player (name) VALUES (?) /// """) /// /// // Set argument and execute - /// try insertStatement.setArguments(["Arthur"]) - /// try insertStatement.execute() + /// try statement.setArguments(["Arthur"]) + /// try statement.execute() /// /// // Set argument and execute in one shot - /// try insertStatement.execute(arguments: ["Barbara"]) + /// try statement.execute(arguments: ["Barbara"]) /// } /// ``` /// + /// When arguments are set at the moment of execution, with an non-nil + /// `arguments` parameter, it is assumed that the statement won't be + /// reused with the same arguments. When the number of arguments is + /// small, execution is performed with temporary SQLite bindings that + /// avoid copying strings and blobs arguments. + /// + /// For more information, see [`SQLITE_STATIC` and `SQLITE_TRANSIENT`](https://www.sqlite.org/c3ref/c_static.html). + /// Compare: + /// + /// ```swift + /// // Uses SQLITE_STATIC if there are few arguments, + /// // SQLITE_TRANSIENT otherwise. + /// try statement.execute(arguments: ["Barbara"]) + /// + /// // Always uses SQLITE_TRANSIENT + /// try statement.setArguments(["Arthur"]) + /// try statement.execute() + /// ``` + /// + /// Both techniques have the same results, but when you care about + /// performances, monitor your application in order to make the + /// best choice. + /// /// - parameter arguments: Optional statement arguments. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. public func execute(arguments: StatementArguments? = nil) throws { - try reset(withArguments: arguments) + if let arguments { + // Assume that the statement won't be reused with the same arguments. + // + // Avoid a stack overflow, and don't perform an unbounded nesting + // of `withBinding(to:at:do:)` methods: only use temporary bindings + // for less than 20 arguments. This number 20 is completely + // arbitrary! + // See + if sqliteArgumentCount <= 20 { + // Perform an optimized execution with temporary bindings + // in order to avoid copying strings and blobs. + try withArguments(arguments) { + try executeAllSteps() + } + } else { + try setArguments(arguments) + try executeAllSteps() + } + } else { + try prepareExecution() + try executeAllSteps() + } + } + + private func executeAllSteps() throws { try database.statementWillExecute(self) // Iterate all rows, since they may execute side effects. @@ -474,6 +623,11 @@ public final class Statement { } } +// Explicit non-conformance to Sendable: statements must be used from +// a serialized database access dispatch queue. +@available(*, unavailable) +extension Statement: Sendable { } + extension Statement: CustomStringConvertible { public var description: String { SchedulingWatchdog.allows(database) ? sql : "Statement" @@ -634,7 +788,7 @@ final class StatementCursor: DatabaseCursor { self._statement = statement // Assume cursor is created for immediate iteration: reset and set arguments - try statement.reset(withArguments: arguments) + try statement.prepareExecution(withArguments: arguments) } deinit { @@ -679,11 +833,50 @@ public protocol StatementBinding { /// Binds a statement argument. /// /// - parameter sqliteStatement: An SQLite statement. - /// - parameter index: 1-based index to statement arguments + /// - parameter index: 1-based index to statement arguments. /// - returns: the code returned by the `sqlite3_bind_xxx` function. func bind(to sqliteStatement: SQLiteStatement, at index: CInt) -> CInt } +/// Helper function for `withBinding(to:at:do:)` methods. +func checkBindingSuccess(code: CInt, sqliteStatement: SQLiteStatement) throws { + if code == SQLITE_OK { return } + let message = String(cString: sqlite3_errmsg(sqlite3_db_handle(sqliteStatement))) + let sql = String(cString: sqlite3_sql(sqliteStatement)).trimmedSQLStatement + throw DatabaseError(resultCode: code, message: message, sql: sql) +} + +/// Calls the given closure after performing temporary bindings that avoid +/// copying strings and blobs. +/// +/// The bindings are valid only during the execution of this method. +/// +/// - parameter bindings: The bindings +/// - parameter sqliteStatement: The SQLite statement +/// - parameter index: The index of the first binding. +/// - parameter body: The closure to execute when arguments are bound. +@usableFromInline +func withBindings( + _ bindings: C, + to sqliteStatement: SQLiteStatement, + from index: CInt = 1, + do body: () throws -> T) +throws -> T +where C: Collection, C.Element == DatabaseValue +{ + guard let binding = bindings.first else { + return try body() + } + + return try binding.withBinding(to: sqliteStatement, at: index) { + try withBindings( + bindings.dropFirst(), + to: sqliteStatement, + from: index + 1, + do: body) + } +} + // MARK: - StatementArguments /// An instance of `StatementArguments` provides the values for argument @@ -699,9 +892,9 @@ public protocol StatementBinding { /// - `@AAAA` (e.g. `@name`): named argument /// - `$AAAA` (e.g. `$name`): named argument /// -/// GRDB does not allow to distinguish between the `:AAAA`, `@AAAA`, and `$AAAA` -/// syntaxes. You are encouraged to write named arguments with a -/// colon prefix: `:name`. +/// All forms are supported, but GRDB does not allow to distinguish between +/// the `:AAAA`, `@AAAA`, and `$AAAA` syntaxes. You are encouraged to write +/// named arguments with a colon prefix: `:name`. /// /// ## Positional Arguments /// @@ -843,7 +1036,7 @@ public struct StatementArguments: Hashable { /// The result is nil unless all array elements conform to the /// ``DatabaseValueConvertible`` protocol. public init?(_ array: [Any]) { - var values = [DatabaseValueConvertible?]() + var values = [(any DatabaseValueConvertible)?]() for value in array { guard let dbValue = DatabaseValue(value: value) else { return nil @@ -1120,36 +1313,42 @@ public struct StatementArguments: Hashable { allowingRemainingValues: Bool) throws -> [DatabaseValue] { - let initialValuesCount = values.count + var iterator = values.makeIterator() + var consumedValuesCount = 0 let bindings = try statement.sqliteArgumentNames.map { argumentName -> DatabaseValue in - if let argumentName = argumentName { + if let argumentName { if let dbValue = namedValues[argumentName] { return dbValue - } else if values.isEmpty { + } else if let value = iterator.next() { + consumedValuesCount += 1 + return value + } else { throw DatabaseError( resultCode: .SQLITE_MISUSE, message: "missing statement argument: \(argumentName)", sql: statement.sql) - } else { - return values.removeFirst() } + } else if let value = iterator.next() { + consumedValuesCount += 1 + return value } else { - if values.isEmpty { - throw DatabaseError( - resultCode: .SQLITE_MISUSE, - message: "wrong number of statement arguments: \(initialValuesCount)", - sql: statement.sql) - } else { - return values.removeFirst() - } + throw DatabaseError( + resultCode: .SQLITE_MISUSE, + message: "wrong number of statement arguments: \(values.count)", + sql: statement.sql) } } - if !allowingRemainingValues && !values.isEmpty { + if !allowingRemainingValues && iterator.next() != nil { throw DatabaseError( resultCode: .SQLITE_MISUSE, - message: "wrong number of statement arguments: \(initialValuesCount)", + message: "wrong number of statement arguments: \(values.count)", sql: statement.sql) } + if consumedValuesCount == values.count { + values.removeAll() + } else { + values = Array(values[consumedValuesCount...]) + } return bindings } } diff --git a/GRDB/Core/StatementAuthorizer.swift b/GRDB/Core/StatementAuthorizer.swift index e77d823eba..e20883049f 100644 --- a/GRDB/Core/StatementAuthorizer.swift +++ b/GRDB/Core/StatementAuthorizer.swift @@ -57,10 +57,10 @@ final class StatementAuthorizer { private func authorize( _ actionCode: CInt, - _ cString1: UnsafePointer?, - _ cString2: UnsafePointer?, - _ cString3: UnsafePointer?, - _ cString4: UnsafePointer?) + _ cString1: UnsafePointer?, + _ cString2: UnsafePointer?, + _ cString3: UnsafePointer?, + _ cString4: UnsafePointer?) -> CInt { // Uncomment when debugging @@ -107,7 +107,7 @@ final class StatementAuthorizer { case SQLITE_DELETE: if isDropStatement { return SQLITE_OK } - guard let cString1 = cString1 else { return SQLITE_OK } + guard let cString1 else { return SQLITE_OK } // Deletions from sqlite_master and sqlite_temp_master are not like // other deletions: `sqlite3_update_hook` does not notify them, and @@ -137,7 +137,7 @@ final class StatementAuthorizer { return SQLITE_OK case SQLITE_TRANSACTION: - guard let cString1 = cString1 else { return SQLITE_OK } + guard let cString1 else { return SQLITE_OK } if strcmp(cString1, "BEGIN") == 0 { transactionEffect = .beginTransaction } else if strcmp(cString1, "COMMIT") == 0 { @@ -148,7 +148,7 @@ final class StatementAuthorizer { return SQLITE_OK case SQLITE_SAVEPOINT: - guard let cString1 = cString1 else { return SQLITE_OK } + guard let cString1 else { return SQLITE_OK } guard let name = cString2.map(String.init) else { return SQLITE_OK } if strcmp(cString1, "BEGIN") == 0 { transactionEffect = .beginSavepoint(name) diff --git a/GRDB/Core/StatementColumnConvertible.swift b/GRDB/Core/StatementColumnConvertible.swift index 27995317e6..cb081ec63d 100644 --- a/GRDB/Core/StatementColumnConvertible.swift +++ b/GRDB/Core/StatementColumnConvertible.swift @@ -12,7 +12,7 @@ /// // Optimized /// let scores = Int.fetchAll(db, sql: "SELECT score FROM player") /// -/// let rows = Row.fetchCursor(db, sql: "SELECT * FROM player") +/// let rows = try Row.fetchCursor(db, sql: "SELECT * FROM player") /// while let row = try rows.next() { /// // Optimized /// let int: Int = row[0] @@ -89,7 +89,7 @@ public protocol StatementColumnConvertible { /// /// Do not check for `NULL` in your implementation of this method. Null /// database values are handled - /// in ``fromStatement(_:atUncheckedIndex:)-2i8y6``. + /// in ``StatementColumnConvertible/fromStatement(_:atUncheckedIndex:)-2i8y6``. /// /// For example, here is the how Int64 adopts StatementColumnConvertible: /// @@ -215,7 +215,7 @@ where Value: DatabaseValueConvertible & StatementColumnConvertible init(statement: Statement, arguments: StatementArguments? = nil, adapter: (any RowAdapter)? = nil) throws { self._statement = statement - if let adapter = adapter { + if let adapter { // adapter may redefine the index of the leftmost column columnIndex = try CInt(adapter.baseColumnIndex(atIndex: 0, layout: statement)) } else { @@ -223,7 +223,7 @@ where Value: DatabaseValueConvertible & StatementColumnConvertible } // Assume cursor is created for immediate iteration: reset and set arguments - try statement.reset(withArguments: arguments) + try statement.prepareExecution(withArguments: arguments) } deinit { @@ -241,6 +241,11 @@ where Value: DatabaseValueConvertible & StatementColumnConvertible } } +// Explicit non-conformance to Sendable: database cursors must be used from +// a serialized database access dispatch queue. +@available(*, unavailable) +extension FastDatabaseValueCursor: Sendable { } + /// Types that adopt both DatabaseValueConvertible and /// StatementColumnConvertible can be efficiently initialized from /// database values. diff --git a/GRDB/Core/Support/Foundation/Data.swift b/GRDB/Core/Support/Foundation/Data.swift index 21b647ae5a..6827b71d9c 100644 --- a/GRDB/Core/Support/Foundation/Data.swift +++ b/GRDB/Core/Support/Foundation/Data.swift @@ -42,6 +42,23 @@ extension Data: DatabaseValueConvertible, StatementColumnConvertible { sqlite3_bind_blob(sqliteStatement, index, $0.baseAddress, CInt($0.count), SQLITE_TRANSIENT) } } + + /// Calls the given closure after binding a statement argument. + /// + /// The binding is valid only during the execution of this method. + /// + /// - parameter sqliteStatement: An SQLite statement. + /// - parameter index: 1-based index to statement arguments. + /// - parameter body: The closure to execute when argument is bound. + func withBinding(to sqliteStatement: SQLiteStatement, at index: CInt, do body: () throws -> T) throws -> T { + try withUnsafeBytes { + let code = sqlite3_bind_blob( + sqliteStatement, index, + $0.baseAddress, CInt($0.count), nil /* SQLITE_STATIC */) + try checkBindingSuccess(code: code, sqliteStatement: sqliteStatement) + return try body() + } + } } // MARK: - Conversions diff --git a/GRDB/Core/Support/Foundation/DatabaseDateComponents.swift b/GRDB/Core/Support/Foundation/DatabaseDateComponents.swift index fd31c3724b..d4ebba478a 100644 --- a/GRDB/Core/Support/Foundation/DatabaseDateComponents.swift +++ b/GRDB/Core/Support/Foundation/DatabaseDateComponents.swift @@ -1,10 +1,10 @@ import Foundation /// A database value that holds date components. -public struct DatabaseDateComponents { +public struct DatabaseDateComponents: Sendable { /// The SQLite formats for date components. - public enum Format: String { + public enum Format: String, Sendable { /// The format "yyyy-MM-dd". case YMD = "yyyy-MM-dd" @@ -75,7 +75,7 @@ extension DatabaseDateComponents: StatementColumnConvertible { } let length = Int(sqlite3_column_bytes(sqliteStatement, index)) // avoid an strlen let components = cString.withMemoryRebound( - to: Int8.self, + to: CChar.self, capacity: length + 1 /* trailing \0 */) { cString in SQLiteDateParser().components(cString: cString, length: length) } diff --git a/GRDB/Core/Support/Foundation/SQLiteDateParser.swift b/GRDB/Core/Support/Foundation/SQLiteDateParser.swift index 74db8e5fdf..4f234749dc 100644 --- a/GRDB/Core/Support/Foundation/SQLiteDateParser.swift +++ b/GRDB/Core/Support/Foundation/SQLiteDateParser.swift @@ -4,6 +4,7 @@ import Foundation @usableFromInline struct SQLiteDateParser { + // swiftlint:disable:next unneeded_synthesized_initializer @usableFromInline init() { } diff --git a/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Decodable.swift b/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Decodable.swift index 0ac8736c61..9e680f66be 100644 --- a/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Decodable.swift +++ b/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Decodable.swift @@ -136,7 +136,7 @@ private struct DatabaseValueDecodingContainer: SingleValueDecodingContainer { /// cannot be converted to the requested type. /// - throws: `DecodingError.valueNotFound` if the encountered encoded value is null. func decode(_ type: T.Type) throws -> T where T: Decodable { - if let type = T.self as? DatabaseValueConvertible.Type { + if let type = T.self as? any DatabaseValueConvertible.Type { // Prefer DatabaseValueConvertible decoding over Decodable. // This allows custom database decoding, such as decoding Date from // String, for example. @@ -179,11 +179,7 @@ extension DatabaseValueConvertible where Self: Decodable { guard let data = Data.fromDatabaseValue(databaseValue) else { return nil } - let decoder = JSONDecoder() - decoder.dataDecodingStrategy = .base64 - decoder.dateDecodingStrategy = .millisecondsSince1970 - decoder.nonConformingFloatDecodingStrategy = .throw - return try? decoder.decode(Self.self, from: data) + return try? databaseJSONDecoder().decode(Self.self, from: data) } catch { return nil } diff --git a/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Encodable.swift b/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Encodable.swift index f9f7127d8a..7735a8fb68 100644 --- a/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Encodable.swift +++ b/GRDB/Core/Support/StandardLibrary/DatabaseValueConvertible+Encodable.swift @@ -2,6 +2,7 @@ import Foundation private struct DatabaseValueEncodingContainer: SingleValueEncodingContainer { let encode: (DatabaseValue) -> Void + let jsonEncoder: JSONEncoder var codingPath: [any CodingKey] { [] } @@ -42,16 +43,21 @@ private struct DatabaseValueEncodingContainer: SingleValueEncodingContainer { // This allows us to encode Date as String, for example. encode(dbValueConvertible.databaseValue) } else { - try DatabaseValueEncoder(encode: encode).encode(value) + try DatabaseValueEncoder(jsonEncoder: jsonEncoder, encode: encode).encode(value) } } } private class DatabaseValueEncoder: Encoder { let encode: (DatabaseValue) -> Void + let jsonEncoder: JSONEncoder var requiresJSON = false - init(encode: @escaping (DatabaseValue) -> Void) { + init( + jsonEncoder: JSONEncoder, + encode: @escaping (DatabaseValue) -> Void + ) { + self.jsonEncoder = jsonEncoder self.encode = encode } @@ -104,7 +110,7 @@ private class DatabaseValueEncoder: Encoder { /// - precondition: May not be called after a value has been encoded through /// a previous `self.singleValueContainer()` call. func singleValueContainer() -> SingleValueEncodingContainer { - DatabaseValueEncodingContainer(encode: encode) + DatabaseValueEncodingContainer(encode: encode, jsonEncoder: jsonEncoder) } func encode(_ value: T) throws { @@ -115,13 +121,7 @@ private class DatabaseValueEncoder: Encoder { throw JSONRequiredError() } } catch is JSONRequiredError { - let encoder = JSONEncoder() - encoder.dataEncodingStrategy = .base64 - encoder.dateEncodingStrategy = .millisecondsSince1970 - encoder.nonConformingFloatEncodingStrategy = .throw - // guarantee some stability in order to ease value comparison - encoder.outputFormatting = .sortedKeys - let jsonData = try encoder.encode(value) + let jsonData = try jsonEncoder.encode(value) // Store JSON String in the database for easier debugging and // database inspection. Thanks to SQLite weak typing, we won't @@ -138,7 +138,11 @@ private class DatabaseValueEncoder: Encoder { extension DatabaseValueConvertible where Self: Encodable { public var databaseValue: DatabaseValue { var dbValue: DatabaseValue! = nil - try! DatabaseValueEncoder(encode: { dbValue = $0 }).encode(self) + try! DatabaseValueEncoder( + jsonEncoder: Self.databaseJSONEncoder(), + encode: { dbValue = $0 } + ) + .encode(self) return dbValue } } diff --git a/GRDB/Core/Support/StandardLibrary/StandardLibrary.swift b/GRDB/Core/Support/StandardLibrary/StandardLibrary.swift index d3528bbcad..021beb4926 100644 --- a/GRDB/Core/Support/StandardLibrary/StandardLibrary.swift +++ b/GRDB/Core/Support/StandardLibrary/StandardLibrary.swift @@ -610,6 +610,21 @@ extension String: DatabaseValueConvertible, StatementColumnConvertible { public func bind(to sqliteStatement: SQLiteStatement, at index: CInt) -> CInt { sqlite3_bind_text(sqliteStatement, index, self, -1, SQLITE_TRANSIENT) } + + /// Calls the given closure after binding a statement argument. + /// + /// The binding is valid only during the execution of this method. + /// + /// - parameter sqliteStatement: An SQLite statement. + /// - parameter index: 1-based index to statement arguments. + /// - parameter body: The closure to execute when argument is bound. + func withBinding(to sqliteStatement: SQLiteStatement, at index: CInt, do body: () throws -> T) throws -> T { + try withCString { + let code = sqlite3_bind_text(sqliteStatement, index, $0, -1, nil /* SQLITE_STATIC */) + try checkBindingSuccess(code: code, sqliteStatement: sqliteStatement) + return try body() + } + } } diff --git a/GRDB/Core/TransactionClock.swift b/GRDB/Core/TransactionClock.swift new file mode 100644 index 0000000000..d878cd4504 --- /dev/null +++ b/GRDB/Core/TransactionClock.swift @@ -0,0 +1,65 @@ +import Foundation + +/// A type that provides the moment of a transaction. +/// +/// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) +/// +/// ## Topics +/// +/// ### Built-in Clocks +/// +/// - ``DefaultTransactionClock`` +/// - ``CustomTransactionClock`` +public protocol TransactionClock { + /// Returns the date of the current transaction. + /// + /// This function is called whenever a transaction starts - precisely + /// speaking, whenever the database connection leaves the auto-commit mode. + /// + /// It is also called when the ``Database/transactionDate`` property is + /// called, and the database connection is not in a transaction. + /// + /// Related SQLite documentation: + func now(_ db: Database) throws -> Date +} + +extension TransactionClock where Self == DefaultTransactionClock { + /// Returns the default clock. + public static var `default`: Self { DefaultTransactionClock() } +} + +extension TransactionClock where Self == CustomTransactionClock { + /// Returns a custom clock. + /// + /// The provided closure is called whenever a transaction starts - precisely + /// speaking, whenever the database connection leaves the auto-commit mode. + /// + /// It is also called when the ``Database/transactionDate`` property is + /// called, and the database connection is not in a transaction. + public static func custom(_ now: @escaping (Database) throws -> Date) -> Self { + CustomTransactionClock(now) + } +} + +/// The default transaction clock. +public struct DefaultTransactionClock: TransactionClock { + /// Returns the start date of the current transaction. + public func now(_ db: Database) throws -> Date { + // An opportunity to fetch transaction time from the database when + // SQLite supports the feature. + Date() + } +} + +/// A custom transaction clock. +public struct CustomTransactionClock: TransactionClock { + let _now: (Database) throws -> Date + + public init(_ now: @escaping (Database) throws -> Date) { + self._now = now + } + + public func now(_ db: Database) throws -> Date { + try _now(db) + } +} diff --git a/GRDB/Core/TransactionObserver.swift b/GRDB/Core/TransactionObserver.swift index e16a0f12d4..d45b3b1206 100644 --- a/GRDB/Core/TransactionObserver.swift +++ b/GRDB/Core/TransactionObserver.swift @@ -2,11 +2,20 @@ extension Database { // MARK: - Database Observation - /// Adds a transaction observer, so that it gets notified of - /// database changes and transactions. + /// Adds a transaction observer on the database connection, so that it + /// gets notified of database changes and transactions. /// /// This method has no effect on read-only database connections. /// + /// For example: + /// + /// ```swift + /// let myObserver = MyObserver() + /// try dbQueue.write { db in + /// db.add(transactionObserver: myObserver) + /// } + /// ``` + /// /// - parameter transactionObserver: A transaction observer. /// - parameter extent: The duration of the observation. The default is /// the observer lifetime (observation lasts until observer @@ -20,21 +29,30 @@ extension Database { // Drop cached statements that delete, because the addition of an // observer may change the need for truncate optimization prevention. - publicStatementCache.removeAll { $0.isDeleteStatement } - internalStatementCache.removeAll { $0.isDeleteStatement } + publicStatementCache.removeAll { $0.canDeleteRows } + internalStatementCache.removeAll { $0.canDeleteRows } observationBroker.add(transactionObserver: transactionObserver, extent: extent) } - /// Removes a transaction observer. + /// Removes a transaction observer from the database connection. + /// + /// For example: + /// + /// ```swift + /// let myObserver = MyObserver() + /// try dbQueue.write { db in + /// db.remove(transactionObserver: myObserver) + /// } + /// ``` public func remove(transactionObserver: some TransactionObserver) { SchedulingWatchdog.preconditionValidQueue(self) guard let observationBroker else { return } // Drop cached statements that delete, because the removal of an // observer may change the need for truncate optimization prevention. - publicStatementCache.removeAll { $0.isDeleteStatement } - internalStatementCache.removeAll { $0.isDeleteStatement } + publicStatementCache.removeAll { $0.canDeleteRows } + internalStatementCache.removeAll { $0.canDeleteRows } observationBroker.remove(transactionObserver: transactionObserver) } @@ -117,7 +135,7 @@ extension Database { } /// The extent of the observation performed by a ``TransactionObserver``. - public enum TransactionObservationExtent { + public enum TransactionObservationExtent: Sendable { /// Observation lasts until observer is deallocated. case observerLifetime /// Observation lasts until the next transaction. @@ -264,6 +282,20 @@ class DatabaseObservationBroker { } } + func notifyChanges(withEventsOfKind eventKinds: [DatabaseEventKind]) throws { + // Support for stopObservingDatabaseChangesUntilNextTransaction() + SchedulingWatchdog.current!.databaseObservationBroker = self + defer { + SchedulingWatchdog.current!.databaseObservationBroker = nil + } + + for observation in transactionObservations where observation.isEnabled { + if eventKinds.contains(where: { observation.observes(eventsOfKind: $0) }) { + observation.databaseDidChange() + } + } + } + // MARK: - Statement execution /// Returns true if there exists some transaction observer interested in @@ -454,7 +486,7 @@ class DatabaseObservationBroker { if savepointStack.isEmpty { // Notify now - for statementObservation in statementObservations where statementObservation.predicate.evaluate(event) { + for statementObservation in statementObservations where statementObservation.tracksEvent(event) { statementObservation.transactionObservation.databaseWillChange(with: event) } } else { @@ -479,7 +511,7 @@ class DatabaseObservationBroker { if savepointStack.isEmpty { // Notify now - for statementObservation in statementObservations where statementObservation.predicate.evaluate(event) { + for statementObservation in statementObservations where statementObservation.tracksEvent(event) { statementObservation.transactionObservation.databaseDidChange(with: event) } } else { @@ -548,6 +580,11 @@ class DatabaseObservationBroker { // even if we actually execute an empty deferred transaction. // // For better or for worse, let's simulate a transaction: + // + // 2023-11-26: I'm glad we did, because that's how we support calls + // to `Database.notifyChanges(in:)` from an empty transaction, as a + // way to tell transaction observers about changes performed by some + // external connection. do { try databaseWillCommit() @@ -639,7 +676,7 @@ class DatabaseObservationBroker { for (event, statementObservations) in eventsBuffer { assert(statementObservations.isEmpty || !database.isReadOnly, "Read-only transactions are not notified") - for statementObservation in statementObservations where statementObservation.predicate.evaluate(event) { + for statementObservation in statementObservations where statementObservation.tracksEvent(event) { event.send(to: statementObservation.transactionObservation) } } @@ -739,311 +776,6 @@ class DatabaseObservationBroker { // MARK: - TransactionObserver -/// A type that tracks database changes and transactions performed in -/// a database. -/// -/// ## Overview -/// -/// `TransactionObserver` is the low-level protocol that supports all -/// features. -/// -/// A transaction observer is notified of individual changes (inserts, updates -/// and deletes), before they are committed to disk, as well as transaction -/// commits and rollbacks. -/// -/// ## Activate a Transaction Observer -/// -/// An observer starts receiving change notifications after it has been added to -/// a database connection with the -/// ``DatabaseWriter/add(transactionObserver:extent:)`` `DatabaseWriter` method, -/// or the ``Database/add(transactionObserver:extent:)`` `Database` method: -/// -/// ```swift -/// let observer = MyObserver() -/// dbQueue.add(transactionObserver: observer) -/// ``` -/// -/// By default, database holds weak references to its transaction observers: -/// they are not retained, and stop getting notifications after they are -/// deallocated. See for -/// more options. -/// -/// ## Database Changes And Transactions -/// -/// Database changes are notified to the ``databaseDidChange(with:)`` callback. -/// This includes indirect changes triggered by `ON DELETE` and `ON UPDATE` -/// actions associated to -/// [foreign keys](https://www.sqlite.org/foreignkeys.html#fk_actions), and -/// [SQL triggers](https://www.sqlite.org/lang_createtrigger.html). -/// -/// Transaction completions are notified to the ``databaseWillCommit()-7mksu``, -/// ``databaseDidCommit(_:)`` and ``databaseDidRollback(_:)`` callbacks. -/// -/// Read-only transactions, changes and transactions performed by external -/// database connections, changes to internal system tables (such as -/// `sqlite_master`), changes to -/// [`WITHOUT ROWID`](https://www.sqlite.org/withoutrowid.html) tables, and the -/// deletion of duplicate rows triggered by -/// [`ON CONFLICT REPLACE`](https://www.sqlite.org/lang_conflict.html) clauses -/// (this last exception might change in a future release of SQLite) are -/// not notified. -/// -/// Notified changes are not actually written to disk until the transaction -/// commits, and the `databaseDidCommit` callback is called. On the other -/// side, `databaseDidRollback` confirms their invalidation: -/// -/// ```swift -/// try dbQueue.write { db in -/// try db.execute(sql: "INSERT ...") // 1. didChange -/// try db.execute(sql: "UPDATE ...") // 2. didChange -/// } // 3. willCommit, 4. didCommit -/// -/// try dbQueue.inTransaction { db in -/// try db.execute(sql: "INSERT ...") // 1. didChange -/// try db.execute(sql: "UPDATE ...") // 2. didChange -/// return .rollback // 3. didRollback -/// } -/// -/// try dbQueue.write { db in -/// try db.execute(sql: "INSERT ...") // 1. didChange -/// throw SomeError() -/// } // 2. didRollback -/// ``` -/// -/// Database statements that are executed outside of any explicit transaction -/// do not drop off the radar: -/// -/// ```swift -/// try dbQueue.writeWithoutTransaction { db in -/// try db.execute(sql: "INSERT ...") // 1. didChange, 2. willCommit, 3. didCommit -/// try db.execute(sql: "UPDATE ...") // 4. didChange, 5. willCommit, 6. didCommit -/// } -/// ``` -/// -/// Changes that are on hold because of a -/// [savepoint](https://www.sqlite.org/lang_savepoint.html) are only notified -/// after the savepoint has been released. This makes sure that notified events -/// are only those that have an opportunity to be committed: -/// -/// ```swift -/// try dbQueue.inTransaction { db in -/// try db.execute(sql: "INSERT ...") // 1. didChange -/// -/// try db.execute(sql: "SAVEPOINT foo") -/// try db.execute(sql: "UPDATE ...") // delayed -/// try db.execute(sql: "UPDATE ...") // delayed -/// try db.execute(sql: "RELEASE SAVEPOINT foo") // 2. didChange, 3. didChange -/// -/// try db.execute(sql: "SAVEPOINT bar") -/// try db.execute(sql: "UPDATE ...") // not notified -/// try db.execute(sql: "ROLLBACK TO SAVEPOINT bar") -/// try db.execute(sql: "RELEASE SAVEPOINT bar") -/// -/// return .commit // 4. willCommit, 5. didCommit -/// } -/// ``` -/// -/// Eventual errors thrown from `databaseWillCommit` are exposed to the -/// application code: -/// -/// ```swift -/// do { -/// try dbQueue.inTransaction { db in -/// ... -/// return .commit // 1. willCommit (throws), 2. didRollback -/// } -/// } catch { -/// // 3. The error thrown by the transaction observer. -/// } -/// ``` -/// -/// - Note: All callbacks are called in the writer dispatch queue, and -/// serialized with all database updates. -/// -/// - Note: The `databaseDidChange` and `databaseWillCommit` callbacks must not -/// touch the SQLite database. This limitation does not apply to -/// `databaseDidCommit` and `databaseDidRollback` which can use their -/// database argument. -/// -/// ## Filtering Database Events -/// -/// **Transaction observers can choose the database changes they are -/// interested in.** -/// -/// The filtering happens in the ``observes(eventsOfKind:)`` method, which tells -/// whether the observer wants notifications of specific kinds of changes, -/// or not. -/// -/// The ``DatabaseEventKind`` argument of `observes(eventsOfKind:)` can -/// distinguish insertions from deletions and updates, and is also able to -/// tell the columns that are about to be changed. -/// -/// For example, an observer can focus on the changes that happen on the -/// "player" database table only: -/// -/// ```swift -/// class PlayerObserver: TransactionObserver { -/// func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool { -/// // Only observe changes to the "player" table. -/// eventKind.tableName == "player" -/// } -/// -/// func databaseDidChange(with event: DatabaseEvent) { -/// // This method is only called for changes that happen to -/// // the "player" table. -/// } -/// } -/// ``` -/// -/// When the `observes(eventsOfKind:)` method returns false for all event kinds, -/// the observer is still notified of transactions. -/// -/// ## Observation Extent -/// -/// **You can specify how long an observer is notified of database changes -/// and transactions.** -/// -/// The `remove(transactionObserver:)` method explicitly stops notifications, -/// at any time: -/// -/// ```swift -/// // From a database queue or pool: -/// dbQueue.remove(transactionObserver: observer) -/// -/// // From a database connection: -/// dbQueue.inDatabase { db in -/// db.remove(transactionObserver: observer) -/// } -/// ``` -/// -/// Alternatively, use the `extent` parameter of the -/// `add(transactionObserver:extent:)` method: -/// -/// ```swift -/// let observer = MyObserver() -/// -/// // On a database queue or pool: -/// dbQueue.add(transactionObserver: observer) // default extent -/// dbQueue.add(transactionObserver: observer, extent: .observerLifetime) -/// dbQueue.add(transactionObserver: observer, extent: .nextTransaction) -/// dbQueue.add(transactionObserver: observer, extent: .databaseLifetime) -/// -/// // On a database connection: -/// dbQueue.inDatabase { db in -/// db.add(transactionObserver: ...) -/// } -/// ``` -/// -/// - The default extent is `.observerLifetime`: the database holds a weak -/// reference to the observer, and the observation automatically ends when the -/// observer is deallocated. Meanwhile, the observer is notified of all changes -/// and transactions. -/// -/// - `.nextTransaction` activates the observer until the current or next -/// transaction completes. The database keeps a strong reference to the observer -/// until its `databaseDidCommit` or `databaseDidRollback` callback is called. -/// Hereafter the observer won't get any further notification. -/// -/// - `.databaseLifetime` has the database retain and notify the observer until -/// the database connection is closed. -/// -/// Finally, an observer can avoid processing database changes until the end -/// of the current transaction. After -/// ``stopObservingDatabaseChangesUntilNextTransaction()``, the -/// `databaseDidChange` callback will not be called until the current -/// transaction completes: -/// -/// ```swift -/// class PlayerObserver: TransactionObserver { -/// var playerTableWasModified = false -/// -/// func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool { -/// eventKind.tableName == "player" -/// } -/// -/// func databaseDidChange(with event: DatabaseEvent) { -/// playerTableWasModified = true -/// -/// // It is pointless to keep on tracking further changes: -/// stopObservingDatabaseChangesUntilNextTransaction() -/// } -/// } -/// ``` -/// -/// ### Support for SQLite Pre-Update Hooks -/// -/// When SQLite is built with the `SQLITE_ENABLE_PREUPDATE_HOOK` option, -/// `TransactionObserver` gets an extra callback which lets you observe -/// individual column values in the rows modified by a transaction: -/// -/// ```swift -/// protocol TransactionObserver: AnyObject { -/// #if SQLITE_ENABLE_PREUPDATE_HOOK -/// /// Notifies before a database change (insert, update, or delete) -/// /// with change information (initial / final values for the row's -/// /// columns). -/// /// -/// /// The event is only valid for the duration of this method call. If you -/// /// need to keep it longer, store a copy: event.copy(). -/// func databaseWillChange(with event: DatabasePreUpdateEvent) -/// #endif -/// } -/// ``` -/// -/// This extra API can be activated in two ways: -/// -/// 1. Use the GRDB.swift CocoaPod with a custom compilation option, as below. -/// It uses the system SQLite, which is compiled with -/// `SQLITE_ENABLE_PREUPDATE_HOOK` support, but only on iOS 11.0+ (we don't -/// know the minimum version of macOS, tvOS, watchOS): -/// -/// ```ruby -/// pod 'GRDB.swift' -/// platform :ios, '11.0' # or above -/// -/// post_install do |installer| -/// installer.pods_project.targets.select { |target| target.name == "GRDB.swift" }.each do |target| -/// target.build_configurations.each do |config| -/// # Enable extra GRDB APIs -/// config.build_settings['OTHER_SWIFT_FLAGS'] = "$(inherited) -D SQLITE_ENABLE_PREUPDATE_HOOK" -/// # Enable extra SQLite APIs -/// config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] = "$(inherited) GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1" -/// end -/// end -/// end -/// ``` -/// -/// **Warning**: make sure you use the right platform version! You will get -/// runtime errors on devices with a lower version. -/// -/// **Note**: the `GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1` option in -/// `GCC_PREPROCESSOR_DEFINITIONS` defines some C function prototypes that -/// are lacking from the system `` header. When Xcode eventually -/// ships with an SDK that includes a complete header, you may get a -/// compiler error about duplicate function definitions. When this happens, -/// just remove this `GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1` option. -/// -/// 2. Use a [custom SQLite build](http://github.com/groue/GRDB.swift/blob/master/Documentation/CustomSQLiteBuilds.md) -/// and activate the `SQLITE_ENABLE_PREUPDATE_HOOK` compilation option. -/// -/// ## Topics -/// -/// ### Filtering Database Changes -/// -/// - ``observes(eventsOfKind:)`` -/// - ``DatabaseEventKind`` -/// -/// ### Handling Database Changes -/// -/// - ``databaseDidChange(with:)`` -/// - ``stopObservingDatabaseChangesUntilNextTransaction()`` -/// - ``DatabaseEvent`` -/// -/// ### Handling Transactions -/// -/// - ``databaseWillCommit()-7mksu`` -/// - ``databaseDidCommit(_:)`` -/// - ``databaseDidRollback(_:)`` public protocol TransactionObserver: AnyObject { /// Returns whether specific kinds of database changes should be notified @@ -1066,9 +798,19 @@ public protocol TransactionObserver: AnyObject { /// When this method returns true for deletion events, the observer /// prevents the /// [truncate optimization](https://www.sqlite.org/lang_delete.html#the_truncate_optimization) - /// from being applied. + /// from being applied on the observed tables. func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool + /// Called when the database was modified in some unspecified way. + /// + /// This method allows a transaction observer to handle changes that are + /// not automatically detected. See + /// and ``Database/notifyChanges(in:)`` for more information. + /// + /// The exact nature of changes is unknown, but they comply to the + /// ``observes(eventsOfKind:)`` test. + func databaseDidChange() + /// Called when the database is changed by an insert, update, or /// delete event. /// @@ -1083,7 +825,8 @@ public protocol TransactionObserver: AnyObject { /// - note: The event is only valid for the duration of this method call. /// If you need to keep it longer, store a copy: `event.copy()`. /// - /// - precondition: This method must not access the database. + /// - precondition: This method must not access the observed writer + /// database connection. func databaseDidChange(with event: DatabaseEvent) /// Called when a transaction is about to be committed. @@ -1091,7 +834,8 @@ public protocol TransactionObserver: AnyObject { /// The transaction observer has an opportunity to rollback pending changes /// by throwing an error from this method. /// - /// - precondition: This method must not access the database. + /// - precondition: This method must not access the observed writer + /// database connection. /// - throws: The eventual error that rollbacks pending changes. func databaseWillCommit() throws @@ -1125,7 +869,7 @@ public protocol TransactionObserver: AnyObject { /// /// Requires SQLite compiled with option SQLITE_ENABLE_PREUPDATE_HOOK. /// - /// As of OSX 10.11.5, and iOS 9.3.2, the built-in SQLite library + /// As of macOS 10.11.5, and iOS 9.3.2, the built-in SQLite library /// does not have this enabled, so you'll need to compile your own /// version of SQLite: /// See @@ -1142,11 +886,15 @@ extension TransactionObserver { public func databaseWillChange(with event: DatabasePreUpdateEvent) { } #endif - /// Prevents the observer from receiving further change notifications until - /// the next transaction. + /// The default implementation does nothing. + public func databaseDidChange() { } + + /// Prevents the observer from receiving further change notifications + /// until the next transaction. /// /// After this method has been called, the ``databaseDidChange(with:)`` - /// method won't be called until the next transaction. + /// and ``databaseDidChange()-7olv7`` methods won't be called until the + /// next transaction. /// /// For example: /// @@ -1159,6 +907,13 @@ extension TransactionObserver { /// return eventKind.tableName == "player" /// } /// + /// func databaseDidChange() { + /// playerTableWasModified = true + /// + /// // It is pointless to keep on tracking further changes: + /// stopObservingDatabaseChangesUntilNextTransaction() + /// } + /// /// func databaseDidChange(with event: DatabaseEvent) { /// playerTableWasModified = true /// @@ -1169,12 +924,12 @@ extension TransactionObserver { /// ``` /// /// - precondition: This method must be called from - /// ``databaseDidChange(with:)``. + /// ``databaseDidChange(with:)`` or ``databaseDidChange()-7olv7``. public func stopObservingDatabaseChangesUntilNextTransaction() { guard let broker = SchedulingWatchdog.current?.databaseObservationBroker else { fatalError(""" stopObservingDatabaseChangesUntilNextTransaction must be called \ - from the databaseDidChange method + from the `databaseDidChange()` or `databaseDidChange(with:)` methods """) } broker.disableUntilNextTransaction(transactionObserver: self) @@ -1227,6 +982,11 @@ final class TransactionObservation { } #endif + func databaseDidChange() { + guard isEnabled else { return } + observer?.databaseDidChange() + } + func databaseDidChange(with event: DatabaseEvent) { guard isEnabled else { return } observer?.databaseDidChange(with: event) @@ -1274,12 +1034,18 @@ final class TransactionObservation { struct StatementObservation { var transactionObservation: TransactionObservation - /// Filters database events that should be notified. - var predicate: DatabaseEventPredicate + /// A predicate that filters database events that should be notified. + /// + /// Call this predicate as a method: + /// + /// ``` + /// if observation.tracksEvent(event) { ... } + /// ``` + var tracksEvent: DatabaseEventPredicate init(transactionObservation: TransactionObservation, trackingEvents predicate: DatabaseEventPredicate) { self.transactionObservation = transactionObservation - self.predicate = predicate + self.tracksEvent = predicate } } @@ -1290,7 +1056,7 @@ struct StatementObservation { /// See the ``TransactionObserver/observes(eventsOfKind:)`` method in the /// ``TransactionObserver`` protocol for more information. @frozen -public enum DatabaseEventKind { +public enum DatabaseEventKind: Sendable { /// The insertion of a row in a database table. case insert(tableName: String) @@ -1343,7 +1109,7 @@ protocol DatabaseEventProtocol { /// ``TransactionObserver`` protocol for more information. public struct DatabaseEvent { /// An event kind. - public enum Kind: CInt { + public enum Kind: CInt, Sendable { /// An insertion event case insert = 18 // SQLITE_INSERT @@ -1392,7 +1158,12 @@ public struct DatabaseEvent { self.impl = impl } - init(kind: Kind, rowID: Int64, databaseNameCString: UnsafePointer?, tableNameCString: UnsafePointer?) { + init( + kind: Kind, + rowID: Int64, + databaseNameCString: UnsafePointer?, + tableNameCString: UnsafePointer?) + { self.init( kind: kind, rowID: rowID, @@ -1402,6 +1173,11 @@ public struct DatabaseEvent { } } +// Explicit non-conformance to Sendable: this type can't be made Sendable +// until GRDB7 where we can distinguish between a transient event and its copy. +@available(*, unavailable) +extension DatabaseEvent: Sendable { } + extension DatabaseEvent: DatabaseEventProtocol { func send(to observer: TransactionObservation) { observer.databaseDidChange(with: self) @@ -1428,8 +1204,8 @@ private protocol DatabaseEventImpl { /// Optimization: MetalDatabaseEventImpl does not create Swift strings from raw /// SQLite char* until actually asked for databaseName or tableName. private struct MetalDatabaseEventImpl: DatabaseEventImpl { - let databaseNameCString: UnsafePointer? - let tableNameCString: UnsafePointer? + let databaseNameCString: UnsafePointer? + let tableNameCString: UnsafePointer? var databaseName: String { String(cString: databaseNameCString!) } var tableName: String { String(cString: tableNameCString!) } @@ -1482,7 +1258,7 @@ public struct DatabasePreUpdateEvent { /// The triggering depth of the row update /// Returns: /// 0 if the preupdate callback was invoked as a result of a direct insert, - // update, or delete operation; + /// update, or delete operation; /// 1 for inserts, updates, or deletes invoked by top-level triggers; /// 2 for changes resulting from triggers called by top-level triggers; /// ... and so forth @@ -1564,8 +1340,8 @@ public struct DatabasePreUpdateEvent { kind: Kind, initialRowID: Int64, finalRowID: Int64, - databaseNameCString: UnsafePointer?, - tableNameCString: UnsafePointer?) + databaseNameCString: UnsafePointer?, + tableNameCString: UnsafePointer?) { self.init( kind: kind, @@ -1621,8 +1397,8 @@ private struct MetalDatabasePreUpdateEventImpl: DatabasePreUpdateEventImpl { let connection: SQLiteConnection let kind: DatabasePreUpdateEvent.Kind - let databaseNameCString: UnsafePointer? - let tableNameCString: UnsafePointer? + let databaseNameCString: UnsafePointer? + let tableNameCString: UnsafePointer? var databaseName: String { String(cString: databaseNameCString!) } var tableName: String { String(cString: tableNameCString!) } @@ -1699,7 +1475,7 @@ private struct MetalDatabasePreUpdateEventImpl: DatabasePreUpdateEventImpl { { var value: SQLiteValue? = nil guard sqlite_func(connection, column, &value) == SQLITE_OK else { return nil } - if let value = value { + if let value { return DatabaseValue(sqliteValue: value) } return nil @@ -1752,7 +1528,7 @@ enum DatabaseEventPredicate { /// statement authorizer. case matching(observedEventKinds: [DatabaseEventKind], authorizerEventKinds: [DatabaseEventKind]) - func evaluate(_ event: some DatabaseEventProtocol) -> Bool { + func callAsFunction(_ event: some DatabaseEventProtocol) -> Bool { switch self { case .all: return true diff --git a/GRDB/Core/WALSnapshot.swift b/GRDB/Core/WALSnapshot.swift index 7ce2910613..4d50c67e08 100644 --- a/GRDB/Core/WALSnapshot.swift +++ b/GRDB/Core/WALSnapshot.swift @@ -1,3 +1,5 @@ +// swiftlint:disable:next line_length +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) /// An instance of WALSnapshot records the state of a WAL mode database for some /// specific point in history. /// @@ -19,14 +21,9 @@ /// Yes, this is an awfully complex logic. /// /// See . -final class WALSnapshot: Sendable { - // Xcode 14 (Swift 5.7) ships with a macOS SDK that misses snapshot support. - // Xcode 14.1 (Swift 5.7.1) ships with a macOS SDK that has snapshot support. - // This is the meaning of (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst))) - // swiftlint:disable:next line_length -#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) - static let available = true - +final class WALSnapshot: @unchecked Sendable { + // @unchecked because sqlite3_snapshot has no threading requirements. + // let sqliteSnapshot: UnsafeMutablePointer init(_ db: Database) throws { @@ -35,11 +32,30 @@ final class WALSnapshot: Sendable { return sqlite3_snapshot_get(db.sqliteConnection, "main", $0) } guard code == SQLITE_OK else { + // + // + // > The following must be true for sqlite3_snapshot_get() to succeed. [...] + // > + // > 1. The database handle must not be in autocommit mode. + // > 2. Schema S of database connection D must be a WAL + // > mode database. + // > 3. There must not be a write transaction open on schema S + // > of database connection D. + // > 4. One or more transactions must have been written to the + // > current wal file since it was created on disk (by any + // > connection). This means that a snapshot cannot be taken + // > on a wal mode database with no wal file immediately + // > after it is first opened. At least one transaction must + // > be written to it first. + + // Test condition 1: if sqlite3_get_autocommit(db.sqliteConnection) != 0 { throw DatabaseError(resultCode: code, message: """ Can't create snapshot because database is in autocommit mode. """) } + + // Test condition 2: if let journalMode = try? String.fetchOne(db, sql: "PRAGMA journal_mode"), journalMode != "wal" { @@ -47,7 +63,14 @@ final class WALSnapshot: Sendable { Can't create snapshot because database is not in WAL mode. """) } - throw DatabaseError(resultCode: code) + + // Condition 3 can't happen because GRDB only calls this + // initializer from read transactions. + // + // Hence it is condition 4 that is false: + throw DatabaseError(resultCode: code, message: """ + Can't create snapshot from a missing or empty wal file. + """) } guard let sqliteSnapshot else { throw DatabaseError(resultCode: .SQLITE_INTERNAL) // WTF SQLite? @@ -65,17 +88,7 @@ final class WALSnapshot: Sendable { /// /// See . func compare(_ other: WALSnapshot) -> CInt { - return sqlite3_snapshot_cmp(sqliteSnapshot, other.sqliteSnapshot) - } -#else - static let available = false - - init(_ db: Database) throws { - throw DatabaseError(resultCode: .SQLITE_MISUSE, message: "snapshots are not available") + sqlite3_snapshot_cmp(sqliteSnapshot, other.sqliteSnapshot) } - - func compare(_ other: WALSnapshot) -> CInt { - preconditionFailure("snapshots are not available") - } -#endif } +#endif diff --git a/GRDB/Core/WALSnapshotTransaction.swift b/GRDB/Core/WALSnapshotTransaction.swift new file mode 100644 index 0000000000..5f56f1d2fc --- /dev/null +++ b/GRDB/Core/WALSnapshotTransaction.swift @@ -0,0 +1,94 @@ +// swiftlint:disable:next line_length +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) +/// A long-live read-only WAL transaction. +/// +/// `WALSnapshotTransaction` **takes ownership** of its reader +/// `SerializedDatabase` (TODO: make it a move-only type eventually). +class WALSnapshotTransaction { + private let reader: SerializedDatabase + private let release: (_ isInsideTransaction: Bool) -> Void + + /// The state of the database at the beginning of the transaction. + let walSnapshot: WALSnapshot + + /// Creates a long-live WAL transaction on a read-only connection. + /// + /// The `release` closure is always called. It is called when the + /// `WALSnapshotTransaction` is deallocated, or if the initializer + /// throws. + /// + /// In normal operations, the argument to `release` is always false, + /// meaning that the connection is no longer in a transaction. If true, + /// the connection has been left inside a transaction, due to + /// some error. + /// + /// Usage: + /// + /// ```swift + /// let transaction = WALSnapshotTransaction( + /// reader: reader, + /// release: { isInsideTransaction in + /// ... + /// }) + /// ``` + /// + /// - parameter reader: A read-only database connection. + /// - parameter release: A closure to call when the read-only connection + /// is no longer used. + init( + onReader reader: SerializedDatabase, + release: @escaping (_ isInsideTransaction: Bool) -> Void) + throws + { + assert(reader.configuration.readonly) + + do { + // Open a long-lived transaction, and enter snapshot isolation + self.walSnapshot = try reader.sync(allowingLongLivedTransaction: true) { db in + try db.beginTransaction(.deferred) + // This also acquires snapshot isolation because checking + // database schema performs a read access. + try db.clearSchemaCacheIfNeeded() + return try WALSnapshot(db) + } + self.reader = reader + self.release = release + } catch { + // self is not initialized, so deinit will not run. + Self.commitAndRelease(reader: reader, release: release) + throw error + } + } + + deinit { + Self.commitAndRelease(reader: reader, release: release) + } + + /// Executes database operations in the snapshot transaction, and + /// returns their result after they have finished executing. + func read(_ value: (Database) throws -> T) rethrows -> T { + // We should check the validity of the snapshot, as DatabaseSnapshotPool does. + try reader.sync(value) + } + + /// Schedules database operations for execution, and + /// returns immediately. + func asyncRead(_ value: @escaping (Database) -> Void) { + // We should check the validity of the snapshot, as DatabaseSnapshotPool does. + reader.async(value) + } + + private static func commitAndRelease( + reader: SerializedDatabase, + release: (_ isInsideTransaction: Bool) -> Void) + { + // WALSnapshotTransaction may be deinitialized in the dispatch + // queue of its reader: allow reentrancy. + let isInsideTransaction = reader.reentrantSync(allowingLongLivedTransaction: false) { db in + try? db.commit() + return db.isInsideTransaction + } + release(isInsideTransaction) + } +} +#endif diff --git a/GRDB/Documentation.docc/Concurrency.md b/GRDB/Documentation.docc/Concurrency.md index 24a69c248a..4c5f7e908a 100644 --- a/GRDB/Documentation.docc/Concurrency.md +++ b/GRDB/Documentation.docc/Concurrency.md @@ -14,7 +14,7 @@ The other chapters cover, with more details, the fundamentals of SQLite concurre **The two concurrency rules are strongly recommended practices.** They are all about SQLite, a robust and reliable database that takes great care of your data: don't miss an opportunity to put it on your side! -### Rule 1: Connect to any database file only once +#### Rule 1: Connect to any database file only once Open one single ``DatabaseQueue`` or ``DatabasePool`` per database file, for the whole duration of your use of the database. Not for the duration of _each_ database access, but really for the duration of _all_ database accesses to this file. @@ -27,7 +27,7 @@ Open one single ``DatabaseQueue`` or ``DatabasePool`` per database file, for the - You will not be able to use the features. - You will see SQLite errors ([`SQLITE_BUSY`]). -### Rule 2: Mind your transactions +#### Rule 2: Mind your transactions Database operations that are grouped in a transaction are guaranteed to be either fully saved on disk, or not at all. Read-only transactions guarantee a stable and immutable view of the database, and do not see changes performed by eventual concurrent writes. @@ -150,31 +150,31 @@ This prevents the database operations from various concurrent accesses from bein **You will generally use the safe database access methods `read` and `write`.** In this context, "safe" means that a database access is concurrency-friendly, because GRDB provides the following guarantees: -### Serialized Writes +#### Serialized Writes **All writes performed by one ``DatabaseQueue`` or ``DatabasePool`` instance are serialized.** This guarantee prevents [`SQLITE_BUSY`] errors during concurrent writes. -### Write Transactions +#### Write Transactions **All writes are wrapped in a transaction.** Concurrent reads can not see partial database updates (even reads performed by other processes). -### Isolated Reads +#### Isolated Reads **All reads are wrapped in a transaction.** An isolated read sees a stable and immutable state of the database, and does not see changes performed by eventual concurrent writes (even writes performed by other processes). See [Isolation In SQLite](https://www.sqlite.org/isolation.html) for more information. -### Forbidden Writes +#### Forbidden Writes **Inside a read access, all attempts to write raise an error.** This enforces the immutability of the database during a read. -### Non-Reentrancy +#### Non-Reentrancy **Database accesses methods are not reentrant.** diff --git a/GRDB/Documentation.docc/DatabaseConnections.md b/GRDB/Documentation.docc/DatabaseConnections.md index d530d8c562..8e4bbbde8a 100644 --- a/GRDB/Documentation.docc/DatabaseConnections.md +++ b/GRDB/Documentation.docc/DatabaseConnections.md @@ -22,6 +22,73 @@ The differences are: **If you are not sure, choose `DatabaseQueue`.** You will always be able to switch to `DatabasePool` later. +## Opening a Connection + +You need a path to a database file in order to open a database connection. + +**When the SQLite file is ready-made, and you do not intend to modify its content**, then add the database file as a [resource of your Xcode project or Swift package](https://developer.apple.com/documentation/xcode), and open a read-only database connection: + +```swift +// HOW TO open a read-only connection to a database resource + +// Get the path to the database resource. +// Replace `Bundle.main` with `Bundle.module` when you write a Swift Package. +if let dbPath = Bundle.main.path(forResource: "db", ofType: "sqlite") + +if let dbPath { + // If the resource exists, open a read-only connection. + // Writes are disallowed because resources can not be modified. + var config = Configuration() + config.readonly = true + let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) +} else { + // The database resource can not be found. + // Fix your setup, or report the problem to the user. +} +``` + +**If the application creates or writes in the database**, then first choose a proper location for the database file. Document-based applications will let the user pick a location. Apps that use the database as a global storage will prefer the Application Support directory. + +> Tip: Regardless of the database location, it is recommended that you wrap the database file inside a dedicated directory. This directory will bundle the main database file and its related [SQLite temporary files](https://www.sqlite.org/tempfiles.html) together. +> +> The dedicated directory helps moving or deleting the whole database when needed: just move or delete the directory. +> +> On iOS, the directory can be encrypted with [data protection](https://developer.apple.com/documentation/uikit/protecting_the_user_s_privacy/encrypting_your_app_s_files), in order to help securing all database files in one shot. When a database is protected, an application that runs in the background on a locked device won't be able to read or write from it. Instead, it will catch ``DatabaseError`` with code [`SQLITE_IOERR`](https://www.sqlite.org/rescode.html#ioerr) (10) "disk I/O error", or [`SQLITE_AUTH`](https://www.sqlite.org/rescode.html#auth) (23) "not authorized". + +The sample code below creates or opens a database file inside its dedicated directory. On the first run, a new empty database file is created. On subsequent runs, the directory and database file already exist, so it just opens a connection: + +```swift +// HOW TO create an empty database, or open an existing database file + +// Create the "Application Support/MyDatabase" directory if needed +let fileManager = FileManager.default +let appSupportURL = try fileManager.url( + for: .applicationSupportDirectory, in: .userDomainMask, + appropriateFor: nil, create: true) +let directoryURL = appSupportURL.appendingPathComponent("MyDatabase", isDirectory: true) +try fileManager.createDirectory(at: directoryURL, withIntermediateDirectories: true) + +// Open or create the database +let databaseURL = directoryURL.appendingPathComponent("db.sqlite") +let dbQueue = try DatabaseQueue(path: databaseURL.path) +``` + +## Closing Connections + +Database connections are automatically closed when ``DatabaseQueue`` or ``DatabasePool`` instances are deinitialized. + +If the correct execution of your program depends on precise database closing, perform an explicit call to ``DatabaseReader/close()``. This method may fail and create zombie connections, so please check its detailed documentation. + + +## Next Steps + +Once connected to the database, your next steps are probably: + +- Define the structure of newly created databases: see . +- If you intend to write SQL, see . Otherwise, see . + +Even if you plan to keep your project mundane and simple, take the time to read the guide eventually. + ## Topics ### Configuring database connections diff --git a/GRDB/Documentation.docc/DatabaseObservation.md b/GRDB/Documentation.docc/DatabaseObservation.md index e2ce7e5a23..9dc34dad5b 100644 --- a/GRDB/Documentation.docc/DatabaseObservation.md +++ b/GRDB/Documentation.docc/DatabaseObservation.md @@ -20,14 +20,20 @@ GRDB puts this SQLite feature to some good use, and lets you observe the databas - ``ValueObservation`` - ``SharedValueObservation`` - ``AsyncValueObservation`` +- ``Database/registerAccess(to:)`` ### Observing Database Transactions - ``DatabaseRegionObservation`` +- ``Database/afterNextTransaction(onCommit:onRollback:)`` ### Low-Level Transaction Observers - ``TransactionObserver`` +- ``Database/add(transactionObserver:extent:)`` +- ``Database/remove(transactionObserver:)`` +- ``DatabaseWriter/add(transactionObserver:extent:)`` +- ``DatabaseWriter/remove(transactionObserver:)`` - ``Database/TransactionObservationExtent`` ### Database Regions diff --git a/GRDB/Documentation.docc/DatabaseSchema.md b/GRDB/Documentation.docc/DatabaseSchema.md index 19296473a9..abfd53021b 100644 --- a/GRDB/Documentation.docc/DatabaseSchema.md +++ b/GRDB/Documentation.docc/DatabaseSchema.md @@ -1,5 +1,7 @@ # The Database Schema +Define or query the database schema. + ## Overview **GRDB supports all database schemas, and has no requirement.** Any existing SQLite database can be opened, and you are free to structure your new databases as you wish. @@ -16,7 +18,7 @@ try db.create(table: "player") { t in When you plan to evolve the schema as new versions of your application ship, wrap all schema changes in . -Prefer Swift methods over raw SQL queries. They allow the compiler to check if a schema change is available on the target operating system. Only use a raw SQL query when no Swift method exist (when creating views or triggers, for example). +Prefer Swift methods over raw SQL queries. They allow the compiler to check if a schema change is available on the target operating system. Only use a raw SQL query when no Swift method exist (when creating triggers, for example). When a schema change is not directly supported by SQLite, or not available on the target operating system, database tables have to be recreated. See for the detailed procedure. @@ -24,7 +26,9 @@ When a schema change is not directly supported by SQLite, or not available on th Even though all schema are supported, some features of the library and of the Swift language are easier to use when the schema follows a few conventions described below. -When those conventions are not applied, or not applicable, you will have to perform extra configurations. +When those conventions are not applied, or not applicable, you will have to perform extra configurations. + +For recommendations specific to JSON columns, see . ### Table names should be English, singular, and camelCased @@ -135,34 +139,18 @@ try db.create(table: "team") { t in try db.create(table: "membership") { t in // Composite primary key t.primaryKey { - t.column("playerId", .integer).references("player") - t.column("teamId", .text).references("team") + t.belongsTo("player") + t.belongsTo("team") } t.column("role", .text).notNull() } - -// REQUIRES EXTRA CONFIGURATION -try db.create(table: "player") { t in - t.column("name", .text).notNull() -} - -try db.create(table: "team") { t in - t.column("id", .text).notNull().unique() - t.column("name", .text).notNull() -} - -try db.create(table: "membership") { t in - t.column("playerId", .integer).notNull() - t.column("teamId", .text).notNull() - t.column("role", .text).notNull() -} ``` Primary keys support record fetching methods such as ``FetchableRecord/fetchOne(_:id:)``, and persistence methods such as ``MutablePersistableRecord/update(_:onConflict:)`` or ``MutablePersistableRecord/delete(_:)``. See when you need to define a table that contains a single row. -☝️ **If the database table does not define any explicit primary key**, the record type for this table needs explicit support for the [hidden `rowid` column](https://www.sqlite.org/rowidtable.html): +☝️ **If the database table does not define any explicit primary key**, identifying specific rows in this table needs explicit support for the [hidden `rowid` column](https://www.sqlite.org/rowidtable.html) in the matching record types: ```swift // A table without any explicit primary key @@ -179,11 +167,6 @@ struct Player: Codable { var score: Int } -extension Player: Identifiable { - // Required because the primary key column is not 'id' - var id: Int64? { rowid } -} - extension Player: FetchableRecord, MutablePersistableRecord { // Required because the primary key // is the hidden rowid column. @@ -257,16 +240,16 @@ Unique indexes makes sure SQLite prevents the insertion of conflicting rows: // RECOMMENDED try db.create(table: "player") { t in t.autoIncrementedPrimaryKey("id") + t.belongsTo("team").notNull() + t.column("position", .integer).notNull() // Players must have distinct names t.column("name", .text).unique() - t.column("teamId", .integer).notNull().references("team") - t.column("position", .integer).notNull() } // One single player at any given position in a team try db.create( - index: "playerTeamPosition", - on: "player", columns: ["teamId", "position"], + indexOn: "player", + columns: ["teamId", "position"], options: .unique) ``` @@ -284,10 +267,7 @@ try db.create( > try db.create(table: "team") { t in > t.column("name", .text) > } -> try db.create( -> index: "teamName", -> on: "team", columns: ["name"], -> options: .unique) +> try db.create(indexOn: "team", columns: ["name"], options: .unique) > ``` > > If you want to turn an undroppable constraint into a droppable index, you'll need to recreate the database table. See for the detailed procedure. @@ -326,7 +306,7 @@ try db.create(table: "player") { t in t.autoIncrementedPrimaryKey("id") t.column("name", .text).notNull() // A player must refer to an existing team - t.column("teamId", .integer).notNull().references("team") + t.belongsTo("team").notNull() } // REQUIRES EXTRA CONFIGURATION @@ -338,6 +318,8 @@ try db.create(table: "player") { t in } ``` +See ``TableDefinition/belongsTo(_:inTable:onDelete:onUpdate:deferred:indexed:)`` for more information about the creation of foreign keys. + GRDB [Associations](https://github.com/groue/GRDB.swift/blob/master/Documentation/AssociationsBasics.md) are automatically configured from foreign keys declared in the database schema: ```swift @@ -381,6 +363,7 @@ extension Team: TableRecord { - ``Database/create(virtualTable:ifNotExists:using:_:)`` - ``Database/drop(table:)`` - ``Database/dropFTS4SynchronizationTriggers(forTable:)`` +- ``Database/dropFTS5SynchronizationTriggers(forTable:)`` - ``Database/rename(table:to:)`` - ``Database/ColumnType`` - ``Database/ConflictResolution`` @@ -390,25 +373,35 @@ extension Team: TableRecord { - ``TableOptions`` - ``VirtualTableModule`` +### Database Views + +- ``Database/create(view:options:columns:as:)`` +- ``Database/create(view:options:columns:asLiteral:)`` +- ``Database/drop(view:)`` +- ``ViewOptions`` + ### Database Indexes +- ``Database/create(indexOn:columns:options:condition:)`` - ``Database/create(index:on:columns:options:condition:)`` +- ``Database/create(index:on:expressions:options:condition:)`` +- ``Database/drop(indexOn:columns:)`` - ``Database/drop(index:)`` - ``IndexOptions`` ### Querying the Database Schema -- ``Database/columns(in:)`` -- ``Database/foreignKeys(on:)`` -- ``Database/indexes(on:)`` +- ``Database/columns(in:in:)`` +- ``Database/foreignKeys(on:in:)`` +- ``Database/indexes(on:in:)`` - ``Database/isGRDBInternalTable(_:)`` - ``Database/isSQLiteInternalTable(_:)`` -- ``Database/primaryKey(_:)`` +- ``Database/primaryKey(_:in:)`` - ``Database/schemaVersion()`` - ``Database/table(_:hasUniqueKey:)`` -- ``Database/tableExists(_:)`` -- ``Database/triggerExists(_:)`` -- ``Database/viewExists(_:)`` +- ``Database/tableExists(_:in:)`` +- ``Database/triggerExists(_:in:)`` +- ``Database/viewExists(_:in:)`` - ``ColumnInfo`` - ``ForeignKeyInfo`` - ``IndexInfo`` @@ -417,9 +410,9 @@ extension Team: TableRecord { ### Integrity Checks - ``Database/checkForeignKeys()`` -- ``Database/checkForeignKeys(in:)`` +- ``Database/checkForeignKeys(in:in:)`` - ``Database/foreignKeyViolations()`` -- ``Database/foreignKeyViolations(in:)`` +- ``Database/foreignKeyViolations(in:in:)`` - ``ForeignKeyViolation`` ### Sunsetted Methods diff --git a/GRDB/Documentation.docc/DatabaseSharing.md b/GRDB/Documentation.docc/DatabaseSharing.md index 49f63f1f2e..50b9569f4c 100644 --- a/GRDB/Documentation.docc/DatabaseSharing.md +++ b/GRDB/Documentation.docc/DatabaseSharing.md @@ -1,6 +1,6 @@ # Sharing a Database -How to share an SQLite Database between several processes. +How to share an SQLite database between multiple processes • Recommendations for App Group containers, App Extensions, App Sandbox, and file coordination. ## Overview @@ -21,11 +21,13 @@ We'll address all of those challenges below. > > Always consider sharing plain files, or any other inter-process communication technique, before sharing an SQLite database. -## Use a Database Pool +## Use the WAL mode -In order to access a shared database, use a ``DatabasePool``. It opens the database in the [WAL mode], which helps sharing a database. +In order to access a shared database, use a ``DatabasePool``. It opens the database in the [WAL mode], which helps sharing a database because it allows multiple processes to access the database concurrently. -Since several processes may open the database at the same time, protect the creation of the database pool with an [NSFileCoordinator]. +It is also possible to use a ``DatabaseQueue``, with the `.wal` ``Configuration/journalMode``. + +Since several processes may open the database at the same time, protect the creation of the database connection with an [NSFileCoordinator]. - In a process that can create and write in the database, use this sample code: @@ -150,11 +152,14 @@ If several processes want to write in the database, configure the database pool ```swift var configuration = Configuration() +configuration.defaultTransactionKind = .immediate configuration.busyMode = .timeout(/* a TimeInterval */) let dbPool = try DatabasePool(path: ..., configuration: configuration) ``` -With such a setup, you may still get `SQLITE_BUSY` errors from all write operations. They will occur if the database remains locked by another process for longer than the specified timeout. You can catch those errors: +Both the `defaultTransactionKind` and `busyMode` are important for preventing `SQLITE_BUSY`. The `immediate` transaction kind prevents write transactions from overlapping, and the busy timeout has write transactions wait, instead of throwing `SQLITE_BUSY`, whenever another process is writing. + +With such a setup, you will still get `SQLITE_BUSY` errors if the database remains locked by another process for longer than the specified timeout. You can catch those errors: ```swift do { @@ -226,9 +231,7 @@ In applications that use the background modes supported by iOS, post `resumeNoti features are not able to detect database changes performed by other processes. -Whenever you need to notify other processes that the database has been changed, you will have to use a cross-process notification mechanism such as [NSFileCoordinator] or [CFNotificationCenterGetDarwinNotifyCenter]. - -You can trigger those notifications automatically with ``DatabaseRegionObservation``: +Whenever you need to notify other processes that the database has been changed, you will have to use a cross-process notification mechanism such as [NSFileCoordinator] or [CFNotificationCenterGetDarwinNotifyCenter]. You can trigger those notifications automatically with ``DatabaseRegionObservation``: ```swift // Notify all changes made to the database @@ -244,6 +247,8 @@ let observer = try observation.start(in: dbPool) { db in } ``` +The processes that observe the database can catch those notifications, and deal with the notified changes. See for some related techniques. + [NSFileCoordinator]: https://developer.apple.com/documentation/foundation/nsfilecoordinator [CFNotificationCenterGetDarwinNotifyCenter]: https://developer.apple.com/documentation/corefoundation/1542572-cfnotificationcentergetdarwinnot [WAL mode]: https://www.sqlite.org/wal.html diff --git a/GRDB/Documentation.docc/Documentation.md b/GRDB/Documentation.docc/Documentation.md deleted file mode 100644 index eead88397c..0000000000 --- a/GRDB/Documentation.docc/Documentation.md +++ /dev/null @@ -1,46 +0,0 @@ -# ``GRDB`` - -A toolkit for SQLite databases, with a focus on application development - -## Overview - -Use GRDB to save your application’s permanent data into SQLite databases. - -The library provides raw access to SQL and advanced SQLite features, because one sometimes enjoys a sharp tool. - -It has robust concurrency primitives, so that multi-threaded applications can efficiently use their databases. - -It grants your application models with persistence and fetching methods, so that you don't have to deal with SQL and raw database rows when you don't want to. - -Compared to [SQLite.swift](https://github.com/stephencelis/SQLite.swift) or [FMDB](https://github.com/ccgus/fmdb), GRDB can spare you a lot of glue code. Compared to [Core Data](https://developer.apple.com/library/content/documentation/Cocoa/Conceptual/CoreData/) or [Realm](http://realm.io), it can simplify your multi-threaded applications. - -## Topics - -### Fundamentals - -- -- -- -- - -### Migrations and The Database Schema - -- -- -- - -### Records and the Query Interface - -- - -### Responding to Database Changes - -- - -### Full-Text Search - -- - -### Combine Publishers - -- ``DatabasePublishers`` diff --git a/GRDB/Documentation.docc/Extension/Configuration.md b/GRDB/Documentation.docc/Extension/Configuration.md index 3954ae64a6..efb77b8a62 100644 --- a/GRDB/Documentation.docc/Extension/Configuration.md +++ b/GRDB/Documentation.docc/Extension/Configuration.md @@ -83,7 +83,9 @@ do { - ``acceptsDoubleQuotedStringLiterals`` - ``busyMode`` - ``foreignKeysEnabled`` +- ``journalMode`` - ``readonly`` +- ``JournalModeConfiguration`` ### Configuring GRDB Connections @@ -92,8 +94,11 @@ do { - ``label`` - ``maximumReaderCount`` - ``observesSuspensionNotifications`` +- ``persistentReadOnlyConnections`` - ``prepareDatabase(_:)`` - ``publicStatementArguments`` +- ``transactionClock`` +- ``TransactionClock`` ### Configuring the Quality of Service diff --git a/GRDB/Documentation.docc/Extension/DatabasePool.md b/GRDB/Documentation.docc/Extension/DatabasePool.md index 43c64f85e6..d23388ee14 100644 --- a/GRDB/Documentation.docc/Extension/DatabasePool.md +++ b/GRDB/Documentation.docc/Extension/DatabasePool.md @@ -78,8 +78,7 @@ A `DatabasePool` needs your application to follow rules in order to deliver its ### Accessing the Database -See ``DatabaseReader`` and ``DatabaseWriter`` for more database -access methods. +See ``DatabaseReader`` and ``DatabaseWriter`` for more database access methods. - ``asyncConcurrentRead(_:)`` - ``writeInTransaction(_:_:)`` diff --git a/GRDB/Documentation.docc/Extension/DatabaseQueue.md b/GRDB/Documentation.docc/Extension/DatabaseQueue.md index 856bcc6550..f8e004a7ad 100644 --- a/GRDB/Documentation.docc/Extension/DatabaseQueue.md +++ b/GRDB/Documentation.docc/Extension/DatabaseQueue.md @@ -88,11 +88,12 @@ A `DatabaseQueue` needs your application to follow rules in order to deliver its - ``init(named:configuration:)`` - ``init(path:configuration:)`` +- ``inMemoryCopy(fromPath:configuration:)`` +- ``temporaryCopy(fromPath:configuration:)`` ### Accessing the Database -See ``DatabaseReader`` and ``DatabaseWriter`` for more database -access methods. +See ``DatabaseReader`` and ``DatabaseWriter`` for more database access methods. - ``inDatabase(_:)`` - ``inTransaction(_:_:)`` diff --git a/GRDB/Documentation.docc/Extension/DatabaseRegionObservation.md b/GRDB/Documentation.docc/Extension/DatabaseRegionObservation.md new file mode 100644 index 0000000000..9bfee067f8 --- /dev/null +++ b/GRDB/Documentation.docc/Extension/DatabaseRegionObservation.md @@ -0,0 +1,111 @@ +# ``GRDB/DatabaseRegionObservation`` + +`DatabaseRegionObservation` tracks changes in a database region, and notifies impactful transactions. + +## Overview + +`DatabaseRegionObservation` tracks insertions, updates, and deletions that impact the tracked region, whether performed with raw SQL, or . This includes indirect changes triggered by [foreign keys actions](https://www.sqlite.org/foreignkeys.html#fk_actions) or [SQL triggers](https://www.sqlite.org/lang_createtrigger.html). + +See below for the list of exceptions. + +`DatabaseRegionObservation` calls your application right after changes have been committed in the database, and before any other thread had any opportunity to perform further changes. *This is a pretty strong guarantee, that most applications do not really need.* Instead, most applications prefer to be notified with fresh values: make sure you check ``ValueObservation`` before using `DatabaseRegionObservation`. + +## DatabaseRegionObservation Usage + +Create a `DatabaseRegionObservation` with one or several requests to track: + +```swift +// Tracks the full player table +let observation = DatabaseRegionObservation(tracking: Player.all()) +``` + +Then start the observation from a ``DatabaseQueue`` or ``DatabasePool``: + +```swift +let cancellable = try observation.start(in: dbQueue) { error in + // Handle error +} onChange: { (db: Database) in + print("Players were changed") +} +``` + +Enjoy the changes notifications: + +```swift +try dbQueue.write { db in + try Player(name: "Arthur").insert(db) +} +// Prints "Players were changed" +``` + +You stop the observation by calling the ``DatabaseCancellable/cancel()`` method on the object returned by the `start` method. Cancellation is automatic when the cancellable is deallocated: + +```swift +cancellable.cancel() +``` + +`DatabaseRegionObservation` can also be turned into a Combine publisher, or an RxSwift observable (see the companion library [RxGRDB](https://github.com/RxSwiftCommunity/RxGRDB)): + +```swift +let cancellable = observation.publisher(in: dbQueue).sink { completion in + // Handle completion +} receiveValue: { (db: Database) in + print("Players were changed") +} +``` + +You can feed `DatabaseRegionObservation` with any type that conforms to the ``DatabaseRegionConvertible`` protocol: ``FetchRequest``, ``DatabaseRegion``, ``Table``, etc. For example: + +```swift +// Observe the score column of the 'player' table +let observation = DatabaseRegionObservation( + tracking: Player.select(Column("score"))) + +// Observe the 'score' column of the 'player' table +let observation = DatabaseRegionObservation( + tracking: SQLRequest("SELECT score FROM player")) + +// Observe both the 'player' and 'team' tables +let observation = DatabaseRegionObservation( + tracking: Table("player"), Table("team")) + +// Observe the full database +let observation = DatabaseRegionObservation( + tracking: .fullDatabase) +``` + +## Dealing with Undetected Changes + +`DatabaseRegionObservation` will not notify impactful transactions whenever the database is modified in an undetectable way: + +- Changes performed by external database connections. +- Changes performed by SQLite statements that are not compiled and executed by GRDB. +- Changes to the database schema, changes to internal system tables such as `sqlite_master`. +- Changes to [`WITHOUT ROWID`](https://www.sqlite.org/withoutrowid.html) tables. + +To have observations notify such undetected changes, applications can take explicit action: call the ``Database/notifyChanges(in:)`` `Database` method from a write transaction: + +```swift +try dbQueue.write { db in + // Notify observations that some changes were performed in the database + try db.notifyChanges(in: .fullDatabase) + + // Notify observations that some changes were performed in the player table + try db.notifyChanges(in: Player.all()) + + // Equivalent alternative + try db.notifyChanges(in: Table("player")) +} +``` + +## Topics + +### Creating DatabaseRegionObservation + +- ``init(tracking:)-5ldbe`` +- ``init(tracking:)-2nqjd`` + +### Observing Database Transactions + +- ``publisher(in:)`` +- ``start(in:onError:onChange:)`` diff --git a/GRDB/Documentation.docc/Extension/DatabaseValueConvertible.md b/GRDB/Documentation.docc/Extension/DatabaseValueConvertible.md new file mode 100644 index 0000000000..97a52239a7 --- /dev/null +++ b/GRDB/Documentation.docc/Extension/DatabaseValueConvertible.md @@ -0,0 +1,183 @@ +# ``GRDB/DatabaseValueConvertible`` + +A type that can convert itself into and out of a database value. + +## Overview + +A `DatabaseValueConvertible` type supports conversion to and from database values (null, integers, doubles, strings, and blobs). `DatabaseValueConvertible` is adopted by `Bool`, `Int`, `String`, `Date`, etc. + +> Note: Types that converts to and from multiple columns in a database row must not conform to the `DatabaseValueConvertible` protocol. Those types are called **record types**, and should conform to record protocols instead. See . + +> Note: Standard collections `Array`, `Set`, and `Dictionary` do not conform to `DatabaseValueConvertible`. To store arrays, sets, or dictionaries in individual database values, wrap them as properties of `Codable` record types. They will automatically be stored as JSON objects and arrays. See . + +## Conforming to the DatabaseValueConvertible Protocol + +To conform to `DatabaseValueConvertible`, implement the two requirements ``fromDatabaseValue(_:)-21zzv`` and ``databaseValue-1ob9k``. Do not customize the ``fromMissingColumn()-7iamp`` requirement. If your type `MyValue` conforms, then the conformance of the optional type `MyValue?` is automatic. + +The implementation of `fromDatabaseValue` must return nil if the type can not be decoded from the raw database value. This nil value will have GRDB throw a decoding error accordingly. + +For example: + +```swift +struct EvenInteger { + let value: Int // Guaranteed even + + init?(_ value: Int) { + guard value.isMultiple(of: 2) else { + return nil // Not an even number + } + self.value = value + } +} + +extension EvenInteger: DatabaseValueConvertible { + var databaseValue: DatabaseValue { + value.databaseValue + } + + static func fromDatabaseValue(_ dbValue: DatabaseValue) -> Self? { + guard let value = Int.fromDatabaseValue(dbValue) else { + return nil // Not an integer + } + return EvenInteger(value) // Nil if not even + } +} +``` + +### Built-in RawRepresentable support + +`DatabaseValueConvertible` implementation is ready-made for `RawRepresentable` types whose raw value is itself `DatabaseValueConvertible`, such as enums: + +```swift +enum Grape: String { + case chardonnay, merlot, riesling +} + +// Encodes and decodes `Grape` as a string in the database: +extension Grape: DatabaseValueConvertible { } +``` + +### Built-in Codable support + +`DatabaseValueConvertible` is also ready-made for `Codable` types, which are automatically coded and decoded from JSON arrays and objects: + +```swift +struct Color: Codable { + var red: Double + var green: Double + var blue: Double +} + +// Encodes and decodes `Color` as a JSON object in the database: +extension Color: DatabaseValueConvertible { } +``` + +By default, such codable value types are encoded and decoded with the standard [JSONEncoder](https://developer.apple.com/documentation/foundation/jsonencoder) and [JSONDecoder](https://developer.apple.com/documentation/foundation/jsondecoder). `Data` values are handled with the `.base64` strategy, `Date` with the `.millisecondsSince1970` strategy, and non conforming floats with the `.throw` strategy. + +To customize the JSON format, provide an explicit implementation for the `DatabaseValueConvertible` requirements, or implement these two methods: + +```swift +protocol DatabaseValueConvertible { + static func databaseJSONDecoder() -> JSONDecoder + static func databaseJSONEncoder() -> JSONEncoder +} +``` + +### Adding support for the Tagged library + +[Tagged](https://github.com/pointfreeco/swift-tagged) is a popular library that makes it possible to enhance the type-safety of our programs with dedicated wrappers around basic types. For example: + +```swift +import Tagged + +struct Player: Identifiable { + // Thanks to Tagged, Player.ID can not be mismatched with Team.ID or + // Award.ID, even though they all wrap strings. + typealias ID = Tagged + var id: ID + var name: String + var score: Int +} +``` + +Applications that use both Tagged and GRDB will want to add those lines somewhere: + +```swift +import GRDB +import Tagged + +// Add database support to Tagged values +extension Tagged: SQLExpressible where RawValue: SQLExpressible { } +extension Tagged: StatementBinding where RawValue: StatementBinding { } +extension Tagged: StatementColumnConvertible where RawValue: StatementColumnConvertible { } +extension Tagged: DatabaseValueConvertible where RawValue: DatabaseValueConvertible { } +``` + +This makes it possible to use `Tagged` values in all the expected places: + +```swift +let id: Player.ID = ... +let player = try Player.find(db, id: id) +``` + +## Optimized Values + +For extra performance, custom value types can conform to both `DatabaseValueConvertible` and ``StatementColumnConvertible``. This extra protocol grants raw access to the [low-level C SQLite interface](https://www.sqlite.org/c3ref/column_blob.html) when decoding values. + +For example: + +```swift +extension EvenInteger: StatementColumnConvertible { + init?(sqliteStatement: SQLiteStatement, index: CInt) { + let int64 = sqlite3_column_int64(sqliteStatement, index) + guard let value = Int(exactly: int64) else { + return nil // Does not fit Int (probably a 32-bit architecture) + } + self.init(value) // Nil if not even + } +} +``` + +This extra conformance is not required: only aim at the low-level C interface if you have identified a performance issue after profiling your application! + +## Topics + +### Creating a Value + +- ``fromDatabaseValue(_:)-21zzv`` +- ``fromMissingColumn()-7iamp`` + +### Accessing the DatabaseValue + +- ``databaseValue-1ob9k`` + +### Configuring the JSON format for the standard Decodable protocol + +- ``databaseJSONDecoder()-7zou9`` +- ``databaseJSONEncoder()-37sff`` + +### Fetching Values from Raw SQL + +- ``fetchCursor(_:sql:arguments:adapter:)-6elcz`` +- ``fetchAll(_:sql:arguments:adapter:)-1cqyb`` +- ``fetchSet(_:sql:arguments:adapter:)-5jene`` +- ``fetchOne(_:sql:arguments:adapter:)-qvqp`` + +### Fetching Values from a Prepared Statement + +- ``fetchCursor(_:arguments:adapter:)-4l6af`` +- ``fetchAll(_:arguments:adapter:)-3abuc`` +- ``fetchSet(_:arguments:adapter:)-6y54n`` +- ``fetchOne(_:arguments:adapter:)-3d7ax`` + +### Fetching Values from a Request + +- ``fetchCursor(_:_:)-8q4r6`` +- ``fetchAll(_:_:)-9hkqs`` +- ``fetchSet(_:_:)-1foke`` +- ``fetchOne(_:_:)-o6yj`` + +### Supporting Types + +- ``DatabaseValueCursor`` +- ``StatementBinding`` diff --git a/GRDB/Documentation.docc/Extension/Statement.md b/GRDB/Documentation.docc/Extension/Statement.md new file mode 100644 index 0000000000..5d6172f056 --- /dev/null +++ b/GRDB/Documentation.docc/Extension/Statement.md @@ -0,0 +1,205 @@ +# ``GRDB/Statement`` + +A prepared statement. + +## Overview + +Prepared statements let you execute an SQL query several times, with different arguments if needed. + +Reusing prepared statements is a performance optimization technique because SQLite parses and analyses the SQL query only once, when the prepared statement is created. + +## Building Prepared Statements + +Build a prepared statement with the ``Database/makeStatement(sql:)`` method: + +```swift +try dbQueue.write { db in + let insertStatement = try db.makeStatement(sql: """ + INSERT INTO player (name, score) VALUES (:name, :score) + """) + + let selectStatement = try db.makeStatement(sql: """ + SELECT * FROM player WHERE name = ? + """) +} +``` + +The `?` and colon-prefixed keys like `:name` in the SQL query are the statement arguments. Set the values for those arguments with arrays or dictionaries of database values, or ``StatementArguments`` instances: + +```swift +insertStatement.arguments = ["name": "Arthur", "score": 1000] +selectStatement.arguments = ["Arthur"] +``` + +Alternatively, the ``Database/makeStatement(literal:)`` method creates prepared statements with support for [SQL Interpolation]: + +```swift +let insertStatement = try db.makeStatement(literal: "INSERT ...") +let selectStatement = try db.makeStatement(literal: "SELECT ...") +// ~~~~~~~ +``` + +The `makeStatement` methods throw an error of code `SQLITE_MISUSE` (21) if the SQL query contains multiple statements joined with a semicolon. See below. + +## Executing Prepared Statements and Fetching Values + +Prepared statements can be executed: + +```swift +try insertStatement.execute() +``` + +To fetch rows and values from a prepared statement, use a fetching method of ``Row``, ``DatabaseValueConvertible``, or ``FetchableRecord``: + +```swift +let players = try Player.fetchCursor(selectStatement) // A Cursor of Player +let players = try Player.fetchAll(selectStatement) // [Player] +let players = try Player.fetchSet(selectStatement) // Set +let player = try Player.fetchOne(selectStatement) // Player? +// ~~~~~~ or Row, Int, String, Date, etc. +``` + +Arguments can be set at the moment of the statement execution: + +```swift +try insertStatement.execute(arguments: ["name": "Arthur", "score": 1000]) +let player = try Player.fetchOne(selectStatement, arguments: ["Arthur"]) +``` + +> Note: A prepared statement that has failed with an error can not be recovered. Create a new instance, or use a cached statement as described below. + +> Tip: When you look after the best performance, take care about a difference between setting the arguments before execution, and setting the arguments at the moment of execution: +> +> ```swift +> // First option +> try statement.setArguments(...) +> try statement.execute() +> +> // Second option +> try statement.execute(arguments: ...) +> ``` +> +> Both perform exactly the same action, and most applications should not care about the difference. Yet: +> +> - ``setArguments(_:)`` performs a copy of string and blob arguments. It uses the low-level [`SQLITE_TRANSIENT`](https://www.sqlite.org/c3ref/c_static.html) option, and fits well the reuse of a given statement with the same arguments. +> - ``execute(arguments:)`` avoids a temporary allocation for string and blob arguments if the number of arguments is small. Instead of `SQLITE_TRANSIENT`, it uses the low-level [`SQLITE_STATIC`](https://www.sqlite.org/c3ref/c_static.html) option. This fits well the reuse of a given statement with various arguments. +> +> Don't make a blind choice, and monitor your app performance if it really matters! + +## Caching Prepared Statements + +When the same query will be used several times in the lifetime of an application, one may feel a natural desire to cache prepared statements. + +Don't cache statements yourself. + +> Note: This is because an application lacks the necessary tools. Statements are tied to specific SQLite connections and dispatch queues which are not managed by the application, especially with a ``DatabasePool`` connection. A change in the database schema [may, or may not](https://www.sqlite.org/compile.html#max_schema_retry) invalidate a statement. + +Instead, use the ``Database/cachedStatement(sql:)`` method. GRDB does all the hard caching and memory management: + +```swift +let statement = try db.cachedStatement(sql: "INSERT ...") +``` + +The variant ``Database/cachedStatement(literal:)`` supports [SQL Interpolation]: + +```swift +let statement = try db.cachedStatement(literal: "INSERT ...") +``` + +Should a cached prepared statement throw an error, don't reuse it. Instead, reload one from the cache. + +## Parsing Multiple Prepared Statements from a Single SQL String + +To build multiple statements joined with a semicolon, use ``Database/allStatements(sql:arguments:)``: + +```swift +let statements = try db.allStatements(sql: """ + INSERT INTO player (name, score) VALUES (?, ?); + INSERT INTO player (name, score) VALUES (?, ?); + """, arguments: ["Arthur", 100, "O'Brien", 1000]) +while let statement = try statements.next() { + try statement.execute() +} +``` + +The variant ``Database/allStatements(literal:)`` supports [SQL Interpolation]: + +```swift +let statements = try db.allStatements(literal: """ + INSERT INTO player (name, score) VALUES (\("Arthur"), \(100)); + INSERT INTO player (name, score) VALUES (\("O'Brien"), \(1000)); + """) +// An alternative way to iterate all statements +try statements.forEach { statement in + try statement.execute() +} +``` + +> Tip: When you intend to run all statements in an SQL string but don't care about individual ones, don't bother iterating individual statement instances! Skip this documentation section and just use ``Database/execute(sql:arguments:)``: +> +> ```swift +> try db.execute(sql: """ +> CREATE TABLE player ...; +> INSERT INTO player ...; +> """) +> ``` + +The results of multiple `SELECT` statements can be joined into a single ``Cursor``. This is the GRDB version of the [`sqlite3_exec()`](https://www.sqlite.org/c3ref/exec.html) function: + +```swift +let statements = try db.allStatements(sql: """ + SELECT ...; + SELECT ...; + """) +let players = try statements.flatMap { statement in + try Player.fetchCursor(statement) +} +for let player = try players.next() { + print(player.name) +} +``` + +The ``SQLStatementCursor`` returned from `allStatements` can be turned into a regular Swift array, but in this case make sure all individual statements can compile even if the previous ones were not executed: + +```swift +// OK: Array of statements +let statements = try Array(db.allStatements(sql: """ + INSERT ...; + UPDATE ...; + """)) + +// FAILURE: Can't build an array of statements since the INSERT won't +// compile until CREATE TABLE is executed. +let statements = try Array(db.allStatements(sql: """ + CREATE TABLE player ...; + INSERT INTO player ...; + """)) +``` + +## Topics + +### Executing a Prepared Statement + +- ``execute(arguments:)`` + +### Arguments + +- ``arguments`` +- ``setArguments(_:)`` +- ``setUncheckedArguments(_:)`` +- ``validateArguments(_:)`` +- ``StatementArguments`` + +### Statement Informations + +- ``columnCount`` +- ``columnNames`` +- ``databaseRegion`` +- ``index(ofColumn:)`` +- ``isReadonly`` +- ``sql`` +- ``sqliteStatement`` +- ``SQLiteStatement`` + + +[SQL Interpolation]: https://github.com/groue/GRDB.swift/blob/master/Documentation/SQLInterpolation.md diff --git a/GRDB/Documentation.docc/Extension/TransactionObserver.md b/GRDB/Documentation.docc/Extension/TransactionObserver.md new file mode 100644 index 0000000000..9815e08520 --- /dev/null +++ b/GRDB/Documentation.docc/Extension/TransactionObserver.md @@ -0,0 +1,285 @@ +# ``GRDB/TransactionObserver`` + +A type that tracks database changes and transactions performed in a database. + +## Overview + +`TransactionObserver` is the low-level protocol that supports all features. + +A transaction observer is notified of individual changes (inserts, updates and deletes), before they are committed to disk, as well as transaction commits and rollbacks. + +## Activate a Transaction Observer + +An observer starts receiving change notifications after it has been added to a database connection with the ``DatabaseWriter/add(transactionObserver:extent:)`` `DatabaseWriter` method, or the ``Database/add(transactionObserver:extent:)`` `Database` method: + +```swift +let observer = MyObserver() +dbQueue.add(transactionObserver: observer) +``` + +By default, database holds weak references to its transaction observers: they are not retained, and stop getting notifications after they are deallocated. See for more options. + +## Database Changes And Transactions + +Database changes are notified to the ``databaseDidChange(with:)`` callback. This includes indirect changes triggered by `ON DELETE` and `ON UPDATE` actions associated to [foreign keys](https://www.sqlite.org/foreignkeys.html#fk_actions), and [SQL triggers](https://www.sqlite.org/lang_createtrigger.html). + +Transaction completions are notified to the ``databaseWillCommit()-7mksu``, ``databaseDidCommit(_:)`` and ``databaseDidRollback(_:)`` callbacks. + +> Important: Some changes and transactions are not automatically notified. See below. + +Notified changes are not actually written to disk until the transaction commits, and the `databaseDidCommit` callback is called. On the other side, `databaseDidRollback` confirms their invalidation: + +```swift +try dbQueue.write { db in + try db.execute(sql: "INSERT ...") // 1. didChange + try db.execute(sql: "UPDATE ...") // 2. didChange +} // 3. willCommit, 4. didCommit + +try dbQueue.inTransaction { db in + try db.execute(sql: "INSERT ...") // 1. didChange + try db.execute(sql: "UPDATE ...") // 2. didChange + return .rollback // 3. didRollback +} + +try dbQueue.write { db in + try db.execute(sql: "INSERT ...") // 1. didChange + throw SomeError() +} // 2. didRollback +``` + +Database statements that are executed outside of any explicit transaction do not drop off the radar: + +```swift +try dbQueue.writeWithoutTransaction { db in + try db.execute(sql: "INSERT ...") // 1. didChange, 2. willCommit, 3. didCommit + try db.execute(sql: "UPDATE ...") // 4. didChange, 5. willCommit, 6. didCommit +} +``` + +Changes that are on hold because of a [savepoint](https://www.sqlite.org/lang_savepoint.html) are only notified after the savepoint has been released. This makes sure that notified events are only those that have an opportunity to be committed: + +```swift +try dbQueue.inTransaction { db in + try db.execute(sql: "INSERT ...") // 1. didChange + + try db.execute(sql: "SAVEPOINT foo") + try db.execute(sql: "UPDATE ...") // delayed + try db.execute(sql: "UPDATE ...") // delayed + try db.execute(sql: "RELEASE SAVEPOINT foo") // 2. didChange, 3. didChange + + try db.execute(sql: "SAVEPOINT bar") + try db.execute(sql: "UPDATE ...") // not notified + try db.execute(sql: "ROLLBACK TO SAVEPOINT bar") + try db.execute(sql: "RELEASE SAVEPOINT bar") + + return .commit // 4. willCommit, 5. didCommit +} +``` + +Eventual errors thrown from `databaseWillCommit` are exposed to the application code: + +```swift +do { + try dbQueue.inTransaction { db in + ... + return .commit // 1. willCommit (throws), 2. didRollback + } +} catch { + // 3. The error thrown by the transaction observer. +} +``` + +- Note: All callbacks are called in the writer dispatch queue, and serialized with all database updates. + +- Note: The `databaseDidChange` and `databaseWillCommit` callbacks must not access the observed writer database connection in any way. This limitation does not apply to `databaseDidCommit` and `databaseDidRollback` which can use their database argument. + +## Filtering Database Events + +**Transaction observers can choose the database changes they are interested in.** + +The ``observes(eventsOfKind:)`` method filters events that are notified to ``databaseDidChange(with:)``. It is the most efficient and recommended change filtering technique, because it is only called once before a database query is executed, and can completely disable change tracking: + +```swift +// Calls `observes(eventsOfKind:)` once. +// Calls `databaseDidChange(with:)` for every updated row, or not at all. +try db.execute(sql: "UPDATE player SET score = score + 1") +``` + +The ``DatabaseEventKind`` argument of `observes(eventsOfKind:)` can distinguish insertions from deletions and updates, and is also able to tell the columns that are about to be changed. + +For example, an observer can focus on the changes that happen on the "player" database table only: + +```swift +class PlayerObserver: TransactionObserver { + func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool { + // Only observe changes to the "player" table. + eventKind.tableName == "player" + } + + func databaseDidChange(with event: DatabaseEvent) { + // This method is only called for changes that happen to + // the "player" table. + } +} +``` + +When the `observes(eventsOfKind:)` method returns false for all event kinds, the observer is still notified of transactions. + +## Observation Extent + +**You can specify how long an observer is notified of database changes and transactions.** + +The `remove(transactionObserver:)` method explicitly stops notifications, at any time: + +```swift +// From a database queue or pool: +dbQueue.remove(transactionObserver: observer) + +// From a database connection: +dbQueue.inDatabase { db in + db.remove(transactionObserver: observer) +} +``` + +Alternatively, use the `extent` parameter of the `add(transactionObserver:extent:)` method: + +```swift +let observer = MyObserver() + +// On a database queue or pool: +dbQueue.add(transactionObserver: observer) // default extent +dbQueue.add(transactionObserver: observer, extent: .observerLifetime) +dbQueue.add(transactionObserver: observer, extent: .nextTransaction) +dbQueue.add(transactionObserver: observer, extent: .databaseLifetime) + +// On a database connection: +dbQueue.inDatabase { db in + db.add(transactionObserver: ...) +} +``` + +- The default extent is `.observerLifetime`: the database holds a weak reference to the observer, and the observation automatically ends when the observer is deallocated. Meanwhile, the observer is notified of all changes and transactions. + +- `.nextTransaction` activates the observer until the current or next transaction completes. The database keeps a strong reference to the observer until its `databaseDidCommit` or `databaseDidRollback` callback is called. Hereafter the observer won't get any further notification. + +- `.databaseLifetime` has the database retain and notify the observer until the database connection is closed. + +Finally, an observer can avoid processing database changes until the end of the current transaction. After ``stopObservingDatabaseChangesUntilNextTransaction()``, the `databaseDidChange` callback will not be called until the current transaction completes: + +```swift +class PlayerObserver: TransactionObserver { + var playerTableWasModified = false + + func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool { + eventKind.tableName == "player" + } + + func databaseDidChange(with event: DatabaseEvent) { + playerTableWasModified = true + + // It is pointless to keep on tracking further changes: + stopObservingDatabaseChangesUntilNextTransaction() + } +} +``` + +## Support for SQLite Pre-Update Hooks + +When SQLite is built with the `SQLITE_ENABLE_PREUPDATE_HOOK` option, `TransactionObserver` gets an extra callback which lets you observe individual column values in the rows modified by a transaction: + +```swift +protocol TransactionObserver: AnyObject { + #if SQLITE_ENABLE_PREUPDATE_HOOK + /// Notifies before a database change (insert, update, or delete) + /// with change information (initial / final values for the row's + /// columns). + /// + /// The event is only valid for the duration of this method call. If you + /// need to keep it longer, store a copy: event.copy(). + func databaseWillChange(with event: DatabasePreUpdateEvent) + #endif +} +``` + +This extra API can be activated in two ways: + +1. Use the GRDB.swift CocoaPod with a custom compilation option, as below. + + It uses the system SQLite, which is compiled with `SQLITE_ENABLE_PREUPDATE_HOOK` support, but only on iOS 11.0+ (we don't know the minimum version of macOS, tvOS, watchOS): + + ```ruby + pod 'GRDB.swift' + platform :ios, '11.0' # or above + + post_install do |installer| + installer.pods_project.targets.select { |target| target.name == "GRDB.swift" }.each do |target| + target.build_configurations.each do |config| + # Enable extra GRDB APIs + config.build_settings['OTHER_SWIFT_FLAGS'] = "$(inherited) -D SQLITE_ENABLE_PREUPDATE_HOOK" + # Enable extra SQLite APIs + config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] = "$(inherited) GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1" + end + end + end + ``` + + **Warning**: make sure you use the right platform version! You will get runtime errors on devices with a lower version. + + **Note**: the `GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1` option in `GCC_PREPROCESSOR_DEFINITIONS` defines some C function prototypes that are lacking from the system `` header. When Xcode eventually ships with an SDK that includes a complete header, you may get a compiler error about duplicate function definitions. When this happens, just remove this `GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1` option. + +2. Use a [custom SQLite build](http://github.com/groue/GRDB.swift/blob/master/Documentation/CustomSQLiteBuilds.md) and activate the `SQLITE_ENABLE_PREUPDATE_HOOK` compilation option. + +## Dealing with Undetected Changes + +The changes and transactions that are not automatically notified to transaction observers are: + +- Read-only transactions. +- Changes and transactions performed by external database connections. +- Changes performed by SQLite statements that are not both compiled and executed through GRDB APIs. +- Changes to the database schema, changes to internal system tables such as `sqlite_master`. +- Changes to [`WITHOUT ROWID`](https://www.sqlite.org/withoutrowid.html) tables. +- The deletion of duplicate rows triggered by [`ON CONFLICT REPLACE`](https://www.sqlite.org/lang_conflict.html) clauses (this last exception might change in a future release of SQLite). + +To notify undetected changes to transaction observers, perform an explicit call to the ``Database/notifyChanges(in:)`` `Database` method. The ``databaseDidChange()-7olv7`` callback will be called accordingly. For example: + +```swift +try dbQueue.write { db in + // Notify observers that some changes were performed in the database + try db.notifyChanges(in: .fullDatabase) + + // Notify observers that some changes were performed in the player table + try db.notifyChanges(in: Player.all()) + + // Equivalent alternative + try db.notifyChanges(in: Table("player")) +} +``` + +To notify a change in the database schema, notify a change to the `sqlite_master` table: + +```swift +try dbQueue.write { db in + // Notify all observers of the sqlite_master table + try db.notifyChanges(in: Table("sqlite_master")) +} +``` + +## Topics + +### Filtering Database Changes + +- ``observes(eventsOfKind:)`` +- ``DatabaseEventKind`` + +### Handling Database Changes + +- ``databaseDidChange()-7olv7`` +- ``databaseDidChange(with:)`` +- ``stopObservingDatabaseChangesUntilNextTransaction()`` +- ``DatabaseEvent`` + +### Handling Transactions + +- ``databaseWillCommit()-7mksu`` +- ``databaseDidCommit(_:)`` +- ``databaseDidRollback(_:)`` diff --git a/GRDB/Documentation.docc/Extension/ValueObservation.md b/GRDB/Documentation.docc/Extension/ValueObservation.md new file mode 100644 index 0000000000..2fbf4f9e1a --- /dev/null +++ b/GRDB/Documentation.docc/Extension/ValueObservation.md @@ -0,0 +1,318 @@ +# ``GRDB/ValueObservation`` + +`ValueObservation` tracks changes in the results of database requests, and notifies fresh values whenever the database changes. + +## Overview + +`ValueObservation` tracks insertions, updates, and deletions that impact the tracked value, whether performed with raw SQL, or . This includes indirect changes triggered by [foreign keys actions](https://www.sqlite.org/foreignkeys.html#fk_actions) or [SQL triggers](https://www.sqlite.org/lang_createtrigger.html). + +See below for the list of exceptions. + +## ValueObservation Usage + +1. Make sure that a unique database connection, ``DatabaseQueue`` or ``DatabasePool``, is kept open during the whole duration of the observation. + +2. Create a `ValueObservation` with a closure that fetches the observed value: + + ```swift + let observation = ValueObservation.tracking { db in + // Fetch and return the observed value + } + + // For example, an observation of [Player], which tracks all players: + let observation = ValueObservation.tracking { db in + try Player.fetchAll(db) + } + + // The same observation, using shorthand notation: + let observation = ValueObservation.tracking(Player.fetchAll) + ``` + + There is no limit on the values that can be observed. An observation can perform multiple requests, from multiple database tables, and use raw SQL. See ``tracking(_:)`` for some examples. + +3. Start the observation in order to be notified of changes: + + ```swift + let cancellable = observation.start(in: dbQueue) { error in + // Handle error + } onChange: { (players: [Player]) in + print("Fresh players", players) + } + ``` + +4. Stop the observation by calling the ``DatabaseCancellable/cancel()`` method on the object returned by the `start` method. Cancellation is automatic when the cancellable is deallocated: + + ```swift + cancellable.cancel() + ``` + +`ValueObservation` can also be turned into an async sequence, a Combine publisher, or an RxSwift observable (see the companion library [RxGRDB](https://github.com/RxSwiftCommunity/RxGRDB)): + +- Async sequence: + + ```swift + do { + for try await players in observation.values(in: dbQueue) { + print("Fresh players", players) + } + } catch { + // Handle error + } + ``` + +- Combine Publisher: + + ```swift + let cancellable = observation.publisher(in: dbQueue).sink { completion in + // Handle completion + } receiveValue: { (players: [Player]) in + print("Fresh players", players) + } + ``` + +## ValueObservation Behavior + +`ValueObservation` notifies an initial value before the eventual changes. + +`ValueObservation` only notifies changes committed to disk. + +By default, `ValueObservation` notifies a fresh value whenever any component of its fetched value is modified (any fetched column, row, etc.). This can be configured: see . + +By default, `ValueObservation` notifies the initial value, as well as eventual changes and errors, on the main dispatch queue, asynchronously. This can be configured: see . + +By default, `ValueObservation` fetches a fresh value immediately after a change is committed in the database. In particular, modifying the database on the main thread triggers a fetch on the main thread as well. This behavior can be configured: see . + +`ValueObservation` may coalesce subsequent changes into a single notification. + +`ValueObservation` may notify consecutive identical values. You can filter out the undesired duplicates with the ``removeDuplicates()`` method. + +Starting an observation retains the database connection, until it is stopped. As long as the observation is active, the database connection won't be deallocated. + +The database observation stops when the cancellable returned by the `start` method is cancelled or deallocated, or if an error occurs. + +> Important: Take care that there are use cases that `ValueObservation` is unfit for. +> +> For example, an application may need to process absolutely all changes, and avoid any coalescing. An application may also need to process changes before any further modifications could be performed in the database file. In those cases, the application needs to track *individual transactions*, not values: use ``DatabaseRegionObservation``. +> +> If you need to process changes before they are committed to disk, use ``TransactionObserver``. + +## ValueObservation Scheduling + +By default, `ValueObservation` notifies the initial value, as well as eventual changes and errors, on the main dispatch queue, asynchronously: + +```swift +// The default scheduling +let cancellable = observation.start(in: dbQueue) { error in + // Called asynchronously on the main dispatch queue +} onChange: { value in + // Called asynchronously on the main dispatch queue + print("Fresh value", value) +} +``` + +You can change this behavior by adding a `scheduling` argument to the `start()` method. + +For example, the ``ValueObservationScheduler/immediate`` scheduler notifies all values on the main dispatch queue, and notifies the first one immediately when the observation starts. + +It is very useful in graphic applications, because you can configure views right away, without waiting for the initial value to be fetched eventually. You don't have to implement any empty or loading screen, or to prevent some undesired initial animation. Take care that the user interface is not responsive during the fetch of the first value, so only use the `immediate` scheduling for very fast database requests! + +The `immediate` scheduling requires that the observation starts from the main dispatch queue (a fatal error is raised otherwise): + +```swift +// Immediate scheduling notifies +// the initial value right on subscription. +let cancellable = observation + .start(in: dbQueue, scheduling: .immediate) { error in + // Called on the main dispatch queue + } onChange: { value in + // Called on the main dispatch queue + print("Fresh value", value) + } +// <- Here "Fresh value" has already been printed. +``` + +The other built-in scheduler ``ValueObservationScheduler/async(onQueue:)`` asynchronously schedules values and errors on the dispatch queue of your choice. Make sure you provide a serial queue, because a concurrent one such as `DispachQueue.global(qos: .default)` would mess with the ordering of fresh value notifications: + +```swift +// Async scheduling notifies all values +// on the specified dispatch queue. +let myQueue: DispatchQueue +let cancellable = observation + .start(in: dbQueue, scheduling: .async(myQueue)) { error in + // Called asynchronously on myQueue + } onChange: { value in + // Called asynchronously on myQueue + print("Fresh value", value) + } +``` + +As described above, the `scheduling` argument controls the execution of the change and error callbacks. You also have some control on the execution of the database fetch: + +- With the `.immediate` scheduling, the initial fetch is always performed synchronously, on the main thread, when the observation starts, so that the initial value can be notified immediately. + +- With the default `.async` scheduling, the initial fetch is always performed asynchronouly. It never blocks the main thread. + +- By default, fresh values are fetched immediately after the database was changed. In particular, modifying the database on the main thread triggers a fetch on the main thread as well. + + To change this behavior, and guarantee that fresh values are never fetched from the main thread, you need a ``DatabasePool`` and an optimized observation created with the ``tracking(regions:fetch:)`` or ``trackingConstantRegion(_:)`` methods. Make sure you read the documentation of those methods, or you might write an observation that misses some database changes. + + It is possible to use a ``DatabasePool`` in the application, and an in-memory ``DatabaseQueue`` in tests and Xcode previews, with the common protocol ``DatabaseWriter``. + + +## ValueObservation Sharing + +Sharing a `ValueObservation` spares database resources. When a database change happens, a fresh value is fetched only once, and then notified to all clients of the shared observation. + +You build a shared observation with ``shared(in:scheduling:extent:)``: + +```swift +// SharedValueObservation<[Player]> +let sharedObservation = ValueObservation + .tracking { db in try Player.fetchAll(db) } + .shared(in: dbQueue) +``` + +`ValueObservation` and `SharedValueObservation` are nearly identical, but the latter has no operator such as `map`. As a replacement, you may for example use Combine apis: + +```swift +let cancellable = try sharedObservation + .publisher() // Turn shared observation into a Combine Publisher + .map { ... } // The map operator from Combine + .sink(...) +``` + + +## Specifying the Tracked Region + +While the standard ``tracking(_:)`` method lets you track changes to a fetched value and receive any changes to it, sometimes your use case might require more granular control. + +Consider a scenario where you'd like to get a specific Player's row, but only when their `score` column changes. You can use ``tracking(region:_:fetch:)`` to do just that: + +```swift +let observation = ValueObservation.tracking( + // Define the tracked database region + // (the score column of the player with id 1) + region: Player.select(Column("score")).filter(id: 1), + // Define what to fetch upon such change to the tracked region + // (the player with id 1) + fetch: { db in try Player.fetchOne(db, id: 1) } +) +``` + +This ``tracking(region:_:fetch:)`` method lets you entirely separate the **observed region(s)** from the **fetched value** itself, for maximum flexibility. See ``DatabaseRegionConvertible`` for more information about the regions that can be tracked. + +## Dealing with Undetected Changes + +`ValueObservation` will not fetch and notify a fresh value whenever the database is modified in an undetectable way: + +- Changes performed by external database connections. +- Changes performed by SQLite statements that are not compiled and executed by GRDB. +- Changes to the database schema, changes to internal system tables such as `sqlite_master`. +- Changes to [`WITHOUT ROWID`](https://www.sqlite.org/withoutrowid.html) tables. + +To have observations notify a fresh values after such an undetected change was performed, applications can take explicit action. For example, cancel and restart observations. Alternatively, call the ``Database/notifyChanges(in:)`` `Database` method from a write transaction: + +```swift +try dbQueue.write { db in + // Notify observations that some changes were performed in the database + try db.notifyChanges(in: .fullDatabase) + + // Notify observations that some changes were performed in the player table + try db.notifyChanges(in: Player.all()) + + // Equivalent alternative + try db.notifyChanges(in: Table("player")) +} +``` + +## ValueObservation Performance + +This section further describes runtime aspects of `ValueObservation`, and provides some optimization tips for demanding applications. + +**`ValueObservation` is triggered by database transactions that may modify the tracked value.** + +Precisely speaking, `ValueObservation` tracks changes in a ``DatabaseRegion``, not changes in values. + +For example, if you track the maximum score of players, all transactions that impact the `score` column of the `player` database table (any update, insertion, or deletion) trigger the observation, even if the maximum score itself is not changed. + +You can filter out undesired duplicate notifications with the ``removeDuplicates()`` method. + +**ValueObservation can create database contention.** In other words, active observations take a toll on the constrained database resources. When triggered by impactful transactions, observations fetch fresh values, and can delay read and write database accesses of other application components. + +When needed, you can help GRDB optimize observations and reduce database contention: + +> Tip: Stop observations when possible. +> +> For example, if a `UIViewController` needs to display database values, it can start the observation in `viewWillAppear`, and stop it in `viewWillDisappear`. +> +> In a SwiftUI application, you can profit from the [GRDBQuery](https://github.com/groue/GRDBQuery) companion library, and its [`View.queryObservation(_:)`](https://swiftpackageindex.com/groue/grdbquery/documentation/grdbquery/queryobservation) method. + +> Tip: Share observations when possible. +> +> Each call to `ValueObservation.start` method triggers independent values refreshes. When several components of your app are interested in the same value, consider sharing the observation with ``shared(in:scheduling:extent:)``. + +> Tip: When the observation processes some raw fetched values, use the ``map(_:)`` operator: +> +> ```swift +> // Plain observation +> let observation = ValueObservation.tracking { db -> MyValue in +> let players = try Player.fetchAll(db) +> return computeMyValue(players) +> } +> +> // Optimized observation +> let observation = ValueObservation +> .tracking { db try Player.fetchAll(db) } +> .map { players in computeMyValue(players) } +> ``` +> +> The `map` operator performs its job without blocking database accesses, and without blocking the main thread. + +> Tip: When the observation tracks a constant database region, create an optimized observation with the ``tracking(regions:fetch:)`` or ``trackingConstantRegion(_:)`` methods. Make sure you read the documentation of those methods, or you might write an observation that misses some database changes. + +**Truncating WAL checkpoints impact ValueObservation.** Such checkpoints are performed with ``Database/checkpoint(_:on:)`` or [`PRAGMA wal_checkpoint`](https://www.sqlite.org/pragma.html#pragma_wal_checkpoint). When an observation is started on a ``DatabasePool``, from a database that has a missing or empty [wal file](https://www.sqlite.org/tempfiles.html#write_ahead_log_wal_files), the observation will always notify two values when it starts, even if the database content is not changed. This is a consequence of the impossibility to create the [wal snapshot](https://www.sqlite.org/c3ref/snapshot_get.html) needed for detecting that no changes were performed during the observation startup. If your application performs truncating checkpoints, you will avoid this behavior if you recreate a non-empty wal file before starting observations. To do so, perform any kind of no-op transaction (such a creating and dropping a dummy table). + + +## Topics + +### Creating a ValueObservation + +- ``tracking(_:)`` +- ``trackingConstantRegion(_:)`` +- ``tracking(region:_:fetch:)`` +- ``tracking(regions:fetch:)`` + +### Creating a Shared Observation + +- ``shared(in:scheduling:extent:)`` +- ``SharedValueObservationExtent`` + +### Accessing Observed Values + +- ``publisher(in:scheduling:)`` +- ``start(in:scheduling:onError:onChange:)`` +- ``values(in:scheduling:bufferingPolicy:)`` +- ``DatabaseCancellable`` +- ``ValueObservationScheduler`` + +### Mapping Values + +- ``map(_:)`` + +### Filtering Values + +- ``removeDuplicates()`` +- ``removeDuplicates(by:)`` + +### Requiring Write Access + +- ``requiresWriteAccess`` + +### Debugging + +- ``handleEvents(willStart:willFetch:willTrackRegion:databaseDidChange:didReceiveValue:didFail:didCancel:)`` +- ``print(_:to:)`` + +### Support + +- ``ValueReducer`` diff --git a/GRDB/Documentation.docc/FullTextSearch.md b/GRDB/Documentation.docc/FullTextSearch.md index 6fcd4bc52b..f7ad9f1230 100644 --- a/GRDB/Documentation.docc/FullTextSearch.md +++ b/GRDB/Documentation.docc/FullTextSearch.md @@ -12,3 +12,4 @@ Please refer to the [Full-Text Search](https://github.com/groue/GRDB.swift/blob/ - ``FTS3`` - ``FTS4`` +- ``FTS5`` diff --git a/GRDB/Documentation.docc/GRDB.md b/GRDB/Documentation.docc/GRDB.md new file mode 100644 index 0000000000..d824121e96 --- /dev/null +++ b/GRDB/Documentation.docc/GRDB.md @@ -0,0 +1,103 @@ +# ``GRDB`` + +A toolkit for SQLite databases, with a focus on application development + +## + +![GRDB Logo](GRDBLogo.png) + +## Overview + +Use this library to save your application’s permanent data into SQLite databases. It comes with built-in tools that address common needs: + +- **SQL Generation** + + Enhance your application models with persistence and fetching methods, so that you don't have to deal with SQL and raw database rows when you don't want to. + +- **Database Observation** + + Get notifications when database values are modified. + +- **Robust Concurrency** + + Multi-threaded applications can efficiently use their databases, including WAL databases that support concurrent reads and writes. + +- **Migrations** + + Evolve the schema of your database as you ship new versions of your application. + +- **Leverage your SQLite skills** + + Not all developers need advanced SQLite features. But when you do, GRDB is as sharp as you want it to be. Come with your SQL and SQLite skills, or learn new ones as you go! + +## Usage + +Start using the database in four steps: + +```swift +import GRDB + +// 1. Open a database connection +let dbQueue = try DatabaseQueue(path: "/path/to/database.sqlite") + +// 2. Define the database schema +try dbQueue.write { db in + try db.create(table: "player") { t in + t.primaryKey("id", .text) + t.column("name", .text).notNull() + t.column("score", .integer).notNull() + } +} + +// 3. Define a record type +struct Player: Codable, FetchableRecord, PersistableRecord { + var id: String + var name: String + var score: Int +} + +// 4. Write and read in the database +try dbQueue.write { db in + try Player(id: "1", name: "Arthur", score: 100).insert(db) + try Player(id: "2", name: "Barbara", score: 1000).insert(db) +} + +let players: [Player] = try dbQueue.read { db in + try Player.fetchAll(db) +} +``` + +## Links and Companion Libraries + +- [GitHub Repository](http://github.com/groue/GRDB.swift) +- [Installation Instructions, encryption with SQLCipher, custom SQLite builds](https://github.com/groue/GRDB.swift#installation) +- [GRDBQuery](https://github.com/groue/GRDBQuery): the SwiftUI companion for GRDB. +- [GRDBSnapshotTesting](https://github.com/groue/GRDBSnapshotTesting): Test your database. + +## Topics + +### Fundamentals + +- +- +- +- + +### Migrations and The Database Schema + +- +- + +### Records and the Query Interface + +- +- +- +- + +### Application Tools + +- +- +- +- ``DatabasePublishers`` diff --git a/GRDB/Documentation.docc/JSON.md b/GRDB/Documentation.docc/JSON.md new file mode 100644 index 0000000000..34fdba1257 --- /dev/null +++ b/GRDB/Documentation.docc/JSON.md @@ -0,0 +1,150 @@ +# JSON Support + +Store and use JSON values in SQLite databases. + +## Overview + +SQLite and GRDB can store and fetch JSON values in database columns. Starting iOS 16+, macOS 10.15+, tvOS 17+, and watchOS 9+, JSON values can be manipulated at the database level. + +## Store and fetch JSON values + +### JSON columns in the database schema + +It is recommended to store JSON values in text columns. In the example below, we create a ``Database/ColumnType/jsonText`` column with ``Database/create(table:options:body:)``: + +```swift +try db.create(table: "player") { t in + t.primaryKey("id", .text) + t.column("name", .text).notNull() + t.column("address", .jsonText).notNull() // A JSON column +} +``` + +> Note: `.jsonText` and `.text` are equivalent, because both build a TEXT column in SQL. Yet the former better describes the intent of the column. +> +> Note: SQLite JSON functions and operators are [documented](https://www.sqlite.org/json1.html#interface_overview) to throw errors if any of their arguments are binary blobs. That's the reason why it is recommended to store JSON as text. + +> Tip: When an application performs queries on values embedded inside JSON columns, indexes can help performance: +> +> ```swift +> // CREATE INDEX "player_on_country" +> // ON "player"("address" ->> 'country') +> try db.create( +> index: "player_on_country", +> on: "player", +> expressions: [ +> JSONColumn("address")["country"], +> ]) +> +> // SELECT * FROM player +> // WHERE "address" ->> 'country' = 'DE' +> let germanPlayers = try Player +> .filter(JSONColumn("address")["country"] == "DE") +> .fetchAll(db) +> ``` + +### Strict and flexible JSON schemas + +[Codable Records](https://github.com/groue/GRDB.swift/blob/master/README.md#codable-records) handle both strict and flexible JSON schemas. + +**For strict schemas**, use `Codable` properties. They will be stored as JSON strings in the database: + +```swift +struct Address: Codable { + var street: String + var city: String + var country: String +} + +struct Player: Codable { + var id: String + var name: String + + // Stored as a JSON string + // {"street": "...", "city": "...", "country": "..."} + var address: Address +} + +extension Player: FetchableRecord, PersistableRecord { } +``` + +**For flexible schemas**, use `String` or `Data` properties. + +In the specific case of `Data` properties, it is recommended to store them as text in the database, because SQLite JSON functions and operators are [documented](https://www.sqlite.org/json1.html#interface_overview) to throw errors if any of their arguments are binary blobs. This encoding is automatic with ``DatabaseDataEncodingStrategy/text``: + +```swift +// JSON String property +struct Player: Codable { + var id: String + var name: String + var address: String // JSON string +} + +extension Player: FetchableRecord, PersistableRecord { } + +// JSON Data property, saved as text in the database +struct Team: Codable { + var id: String + var color: String + var info: Data // JSON UTF8 data +} + +extension Team: FetchableRecord, PersistableRecord { + // Support SQLite JSON functions and operators + // by storing JSON data as database text: + static let databaseDataEncodingStrategy = DatabaseDataEncodingStrategy.text +} +``` + +## Manipulate JSON values at the database level + +[SQLite JSON functions and operators](https://www.sqlite.org/json1.html) are available starting iOS 16+, macOS 10.15+, tvOS 17+, and watchOS 9+. + +Functions such as `JSON`, `JSON_EXTRACT`, `JSON_PATCH` and others are available as static methods on `Database`: ``Database/json(_:)``, ``Database/jsonExtract(_:atPath:)``, ``Database/jsonPatch(_:with:)``, etc. + +See the full list below. + +## JSON table-valued functions + +The JSON table-valued functions `json_each` and `json_tree` are not supported. + +## Topics + +### JSON Values + +- ``SQLJSONExpressible`` +- ``JSONColumn`` + +### Access JSON subcomponents, and query JSON values, at the SQL level + +The `->` and `->>` SQL operators are available on the ``SQLJSONExpressible`` protocol. + +- ``Database/jsonArrayLength(_:)`` +- ``Database/jsonArrayLength(_:atPath:)`` +- ``Database/jsonExtract(_:atPath:)`` +- ``Database/jsonExtract(_:atPaths:)`` +- ``Database/jsonType(_:)`` +- ``Database/jsonType(_:atPath:)`` + +### Build new JSON values at the SQL level + +- ``Database/json(_:)`` +- ``Database/jsonArray(_:)-8xxe3`` +- ``Database/jsonArray(_:)-469db`` +- ``Database/jsonObject(_:)`` +- ``Database/jsonQuote(_:)`` +- ``Database/jsonGroupArray(_:filter:)`` +- ``Database/jsonGroupObject(key:value:filter:)`` + +### Modify JSON values at the SQL level + +- ``Database/jsonInsert(_:_:)`` +- ``Database/jsonPatch(_:with:)`` +- ``Database/jsonReplace(_:_:)`` +- ``Database/jsonRemove(_:atPath:)`` +- ``Database/jsonRemove(_:atPaths:)`` +- ``Database/jsonSet(_:_:)`` + +### Validate JSON values at the SQL level + +- ``Database/jsonIsValid(_:)`` diff --git a/GRDB/Documentation.docc/Migrations.md b/GRDB/Documentation.docc/Migrations.md index 3286237f28..ee7ed0f837 100644 --- a/GRDB/Documentation.docc/Migrations.md +++ b/GRDB/Documentation.docc/Migrations.md @@ -26,14 +26,11 @@ migrator.registerMigration("Create authors") { db in migrator.registerMigration("Add books and author.birthYear") { db in try db.create(table: "book") { t in t.autoIncrementedPrimaryKey("id") - t.column("authorId", .integer) - .notNull() - .indexed() - .references("author", onDelete: .cascade) + t.belongsTo("author").notNull() t.column("title", .text).notNull() } - try db.alter(table: "author") { t + try db.alter(table: "author") { t in t.add(column: "birthYear", .integer) } } @@ -236,7 +233,7 @@ To prevent a migration from committing foreign key violations on disk, you can: } ``` -As in the above example, check for foreign key violations with the ``Database/checkForeignKeys()`` and ``Database/checkForeignKeys(in:)`` methods. They throw a nicely detailed ``DatabaseError`` that contains a lot of debugging information: +As in the above example, check for foreign key violations with the ``Database/checkForeignKeys()`` and ``Database/checkForeignKeys(in:in:)`` methods. They throw a nicely detailed ``DatabaseError`` that contains a lot of debugging information: ```swift // SQLite error 19: FOREIGN KEY constraint violation - from book(authorId) to author(id), diff --git a/GRDB/Documentation.docc/QueryInterface.md b/GRDB/Documentation.docc/QueryInterface.md index 066478bf37..f09b20f473 100644 --- a/GRDB/Documentation.docc/QueryInterface.md +++ b/GRDB/Documentation.docc/QueryInterface.md @@ -2,6 +2,10 @@ Record types and the query interface build SQL queries for you. +## Overview + +For an overview, see [Records](https://github.com/groue/GRDB.swift/blob/master/README.md#records), and [The Query Interface](https://github.com/groue/GRDB.swift/blob/master/README.md#the-query-interface). + ## Topics ### Records @@ -13,16 +17,22 @@ Record types and the query interface build SQL queries for you. - ``PersistableRecord`` - ``TableRecord`` -### Associations +### Expressions -- ``Association`` +- ``Column`` +- ``JSONColumn`` +- ``SQLExpression`` -### Query Interface Requests +### Requests - ``CommonTableExpression`` - ``QueryInterfaceRequest`` - ``Table`` +### Associations + +- ``Association`` + ### Errors - ``RecordError`` @@ -30,9 +40,11 @@ Record types and the query interface build SQL queries for you. ### Supporting Types +- ``ColumnExpression`` - ``DerivableRequest`` - ``SQLExpressible`` +- ``SQLJSONExpressible`` - ``SQLSpecificExpressible`` +- ``SQLSubqueryable`` - ``SQLOrderingTerm`` - ``SQLSelectable`` -- ``SQLSubqueryable`` diff --git a/GRDB/Documentation.docc/RecordRecommendedPractices.md b/GRDB/Documentation.docc/RecordRecommendedPractices.md new file mode 100644 index 0000000000..405bb8735a --- /dev/null +++ b/GRDB/Documentation.docc/RecordRecommendedPractices.md @@ -0,0 +1,596 @@ +# Recommended Practices for Designing Record Types + +Leverage the best of record types and associations. + +## Overview + +GRDB sits right between low-level SQLite wrappers, and high-level ORMs like [Core Data], so you may face questions when designing the model layer of your application. + +This is the topic of this article. Examples will be illustrated with a simple library database made of books and their authors. + +## Trust SQLite More Than Yourself + +Let's put things in the right order. An SQLite database stored on a user's device is more important than the Swift code that accesses it. When a user installs a new version of an application, only the database stored on the user's device remains the same. But all the Swift code may have changed. + +This is why it is recommended to define a **robust database schema** even before playing with record types. + +This is important because SQLite is very robust, whereas we developers write bugs. The more responsibility we give to SQLite, the less code we have to write, and the fewer defects we will ship on our users' devices, affecting their precious data. + +For example, if we were to define that configure a database made of books and their authors, we could write: + +```swift +var migrator = DatabaseMigrator() + +migrator.registerMigration("createLibrary") { db in + try db.create(table: "author") { t in // (1) + t.autoIncrementedPrimaryKey("id") // (2) + t.column("name", .text).notNull() // (3) + t.column("countryCode", .text) // (4) + } + + try db.create(table: "book") { t in + t.autoIncrementedPrimaryKey("id") + t.column("title", .text).notNull() // (5) + t.belongsTo("author", onDelete: .cascade) // (6) + .notNull() // (7) + } +} + +try migrator.migrate(dbQueue) +``` + +1. Our database tables follow the : table names are English, singular, and camelCased. They look like Swift identifiers: `author`, `book`, `postalAddress`, `httpRequest`. +2. Each author has a unique id. +3. An author must have a name. +4. The country of an author is not always known. +5. A book must have a title. +6. The `book.authorId` column is used to link a book to the author it belongs to. This column is indexed in order to ease the selection of an author's books. A foreign key is defined from `book.authorId` column to `authors.id`, so that SQLite guarantees that no book refers to a missing author. The `onDelete: .cascade` option has SQLite automatically delete all of an author's books when that author is deleted. See [Foreign Key Actions](https://sqlite.org/foreignkeys.html#fk_actions) for more information. +7. The `book.authorId` column is not null so that SQLite guarantees that all books have an author. + +Thanks to this database schema, the application will always process *consistent data*, no matter how wrong the Swift code can get. Even after a hard crash, all books will have an author, a non-nil title, etc. + +> Tip: **A local SQLite database is not a JSON payload loaded from a remote server.** +> +> The JSON format and content can not be controlled, and an application must defend itself against wacky servers. But a local database is under your full control. It is trustable. A relational database such as SQLite guarantees the quality of users data, as long as enough energy is put in the proper definition of the database schema. + +> Tip: **Plan early for future versions of your application**: use . + +## Record Types + +### Persistable Record Types are Responsible for Their Tables + +**Define one record type per database table.** This record type will be responsible for writing in this table. + +**Let's start from regular structs** whose properties match the columns in their database table. They conform to the standard [`Codable`] protocol so that we don't have to write the methods that convert to and from raw database rows. + +```swift +struct Author: Codable { + var id: Int64? + var name: String + var countryCode: String? +} + +struct Book: Codable { + var id: Int64? + var authorId: Int64 + var title: String +} +``` + +**We add database powers to our types with record protocols.** + +The `author` and `book` tables have an auto-incremented id. We want inserted records to learn about their id after a successful insertion. That's why we have them conform to the ``MutablePersistableRecord`` protocol, and implement ``MutablePersistableRecord/didInsert(_:)-109jm``. Other kinds of record types would just use ``PersistableRecord``, and ignore `didInsert`. + +On the reading side, we use ``FetchableRecord``, the protocol that can decode database rows. + +This gives: + +```swift +// Add Database access +extension Author: FetchableRecord, MutablePersistableRecord { + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} + +extension Book: FetchableRecord, MutablePersistableRecord { + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} +``` + +That's it. The `Author` type can read and write in the `author` database table. `Book` as well, in `book`: + +```swift +try dbQueue.write { db in + // Insert and set author's id + var author = Author(name: "Herman Melville", countryCode: "US") + try author.insert(db) + + // Insert and set book's id + var book = Book(authorId: author.id!, title: "Moby-Dick") + try book.insert(db) +} + +let books = try dbQueue.read { db in + try Book.fetchAll(db) +} +``` + +> Tip: When a column of a database table can't be NULL, define a non-optional property in the record type. On the other side, when the database may contain NULL, define an optional property. Compare: +> +> ```swift +> try db.create(table: "author") { t in +> t.autoIncrementedPrimaryKey("id") +> t.column("name", .text).notNull() // Can't be NULL +> t.column("countryCode", .text) // Can be NULL +> } +> +> struct Author: Codable { +> var id: Int64? +> var name: String // Not optional +> var countryCode: String? // Optional +> } +> ``` +> +> There are exceptions to this rule. +> +> For example, the `id` column is never NULL in the database. And yet, `Author` as an optional `id` property. That is because we want to create instances of `Author` before they could be inserted in the database, and be assigned an auto-incremented id. If the `id` property was not optional, the `Author` type could not profit from auto-incremented ids! +> +> Another exception to this rule is described in , where the creation date of a record is never NULL in the database, but optional in the Swift type. + +> Tip: When the database table has a single-column primary key, have the record type adopt the standard [`Identifiable`] protocol. This allows GRDB to define extra methods based on record ids: +> +> ```swift +> let authorID: Int64 = 42 +> let author: Author = try dbQueue.read { db in +> try Author.find(db, id: authorID) +> } +> ``` +> +> Take care that **`Identifiable` is not a good fit for optional ids**. You will frequently meet optional ids for records with auto-incremented ids: +> +> ```swift +> struct Player: Codable { +> var id: Int64? // Optional ids are not suitable for Identifiable +> var name: String +> var score: Int +> } +> +> extension Player: FetchableRecord, MutablePersistableRecord { +> // Update auto-incremented id upon successful insertion +> mutating func didInsert(_ inserted: InsertionSuccess) { +> id = inserted.rowID +> } +> } +> ``` +> +> For more details about auto-incremented ids and `Identifiable`, see [issue #1435](https://github.com/groue/GRDB.swift/issues/1435#issuecomment-1740857712). + +### Record Types Hide Intimate Database Details + +In the previous sample codes, the `Book` and `Author` structs have one property per database column, and their types are natively supported by SQLite (`String`, `Int`, etc.) + +But it happens that raw database column names, or raw column types, are not a very good fit for the application. + +When this happens, it's time to **distinguish the Swift and database representations**. Record types are the dedicated place where raw database values can be transformed into Swift types that are well-suited for the rest of the application. + +Let's look at three examples. + +#### First Example: Enums + +Authors write books, and more specifically novels, poems, essays, or theatre plays. Let's add a `kind` column in the database. We decide that a book kind is represented as a string ("novel", "essay", etc.) in the database: + +```swift +try db.create(table: "book") { t in + ... + t.column("kind", .text).notNull() +} +``` + +In Swift, it is not a good practice to use `String` for the type of the `kind` property. We prefer an enum instead: + +```swift +struct Book: Codable { + enum Kind: String, Codable { + case essay, novel, poetry, theater + } + var id: Int64? + var authorId: Int64 + var title: String + var kind: Kind +} +``` + +Thanks to its enum property, the `Book` record prevents invalid book kinds from being stored into the database. + +In order to use `Book.Kind` in database requests for books (see below), we add the ``DatabaseValueConvertible`` conformance to `Book.Kind`: + +```swift +extension Book.Kind: DatabaseValueConvertible { } + +// Fetch all novels +let novels = try dbQueue.read { db in + try Book.filter(Column("kind") == Book.Kind.novel).fetchAll(db) +} +``` + +#### Second Example: GPS Coordinates + +GPS coordinates can be stored in two distinct `latitude` and `longitude` columns. But the standard way to deal with such coordinate is a single `CLLocationCoordinate2D` struct. + +When this happens, keep column properties private, and provide sensible accessors instead: + +```swift +try db.create(table: "place") { t in + t.autoIncrementedPrimaryKey("id") + t.column("name", .text).notNull() + t.column("latitude", .double).notNull() + t.column("longitude", .double).notNull() +} + +struct Place: Codable { + var id: Int64? + var name: String + private var latitude: CLLocationDegrees + private var longitude: CLLocationDegrees + + var coordinate: CLLocationCoordinate2D { + get { + CLLocationCoordinate2D( + latitude: latitude, + longitude: longitude) + } + set { + latitude = newValue.latitude + longitude = newValue.longitude + } + } +} +``` + +Generally speaking, private properties make it possible to hide raw columns from the rest of the application. The next example shows another application of this technique. + +#### Third Example: Money Amounts + +Before storing money amounts in an SQLite database, take care that [floating-point numbers are never a good fit](https://stackoverflow.com/questions/3730019/why-not-use-double-or-float-to-represent-currency). + +SQLite only supports two kinds of numbers: integers and doubles, so we'll store amounts as integers. $12.00 will be represented by 1200, a quantity of cents. This allows SQLite to compute exact sums of price, for example. + +On the other side, an amount of cents is not very practical for the rest of the Swift application. The [`Decimal`] type looks like a better fit. + +That's why the `Product` record type has a `price: Decimal` property, backed by a `priceCents` integer column: + +```swift +try db.create(table: "product") { t in + t.autoIncrementedPrimaryKey("id") + t.column("name", .text).notNull() + t.column("priceCents", .integer).notNull() +} + +struct Product: Codable { + var id: Int64? + var name: String + private var priceCents: Int + + var price: Decimal { + get { + Decimal(priceCents) / 100 + } + set { + priceCents = Self.cents(for: newValue) + } + } + + private static func cents(for value: Decimal) -> Int { + Int(Double(truncating: NSDecimalNumber(decimal: value * 100))) + } +} +``` + +## Record Requests + +Once we have record types that are able to read and write in the database, we'd like to perform database requests of such records. + +### Columns + +Requests that filter or sort records are defined with **columns**, defined in a dedicated enumeration. When the record type conforms to [`Codable`], columns can be derived from the `CodingKeys` enum: + +```swift +// HOW TO define columns for a Codable record +extension Author { + enum Columns { + static let id = Column(CodingKeys.id) + static let name = Column(CodingKeys.name) + static let countryCode = Column(CodingKeys.countryCode) + } +} +``` + +For other record types, declare a plain `String` enum that conforms to the ``ColumnExpression`` protocol: + +```swift +// HOW TO define columns for a non-Codable record +extension Author { + enum Columns: String, ColumnExpression { + case id, name, countryCode + } +} +``` + +From those columns it is possible to define requests of type ``QueryInterfaceRequest``: + +```swift +try dbQueue.read { db in + // Fetch all authors, ordered by name, + // in a localized case-insensitive fashion + let sortedAuthors: [Author] = try Author.all() + .order(Author.Columns.name.collating(.localizedCaseInsensitiveCompare)) + .fetchAll(db) + + // Count French authors + let frenchAuthorCount: Int = try Author.all() + .filter(Author.Columns.countryCode == "FR") + .fetchCount(db) +} +``` + +### Turn Commonly-Used Requests into Methods + +An application can define reusable request methods that extend the built-in GRDB apis. Those methods avoid code repetition, ease refactoring, and foster testability. + +Define those methods in extensions of the ``DerivableRequest`` protocol, as below: + +```swift +// Author requests +extension DerivableRequest { + /// Order authors by name, in a localized case-insensitive fashion + func orderByName() -> Self { + let name = Author.Columns.name + return order(name.collating(.localizedCaseInsensitiveCompare)) + } + + /// Filters authors from a country + func filter(countryCode: String) -> Self { + filter(Author.Columns.countryCode == countryCode) + } +} + +// Book requests +extension DerivableRequest { + /// Order books by title, in a localized case-insensitive fashion + func orderByTitle() -> Self { + let title = Book.Columns.title + return order(title.collating(.localizedCaseInsensitiveCompare)) + } + + /// Filters books by kind + func filter(kind: Book.Kind) -> Self { + filter(Book.Columns.kind == kind) + } +} +``` + +Those methods define a fluent and legible api that encapsulates intimate database details: + +```swift +try dbQueue.read { db in + let sortedSpanishAuthors: [Author] = try Author.all() + .filter(countryCode: "ES") + .orderByName() + .fetchAll(db) + + let novelCount: Int = try Book.all() + .filter(kind: .novel) + .fetchCount(db) +} +``` + +Extensions to the `DerivableRequest` protocol can not change the type of requests. They remain requests of the base record. To define requests of another type, use an extension to ``QueryInterfaceRequest``, as in the example below: + +```swift +extension QueryInterfaceRequest { + // Selects author ids + func selectId() -> QueryInterfaceRequest { + selectPrimaryKey(as: Int64.self) + } +} + +// The ids of Japanese authors +let ids: Set = try Author.all() + .filter(countryCode: "JP") + .selectId() + .fetchSet(db) +``` + +## Associations + +[Associations] help navigating from authors to their books and vice versa. Because the `book` table has an `authorId` column, we say that each book **belongs to** its author, and each author **has many** books: + +```swift +extension Book { + static let author = belongsTo(Author.self) +} + +extension Author { + static let books = hasMany(Book.self) +} +``` + +With associations, you can fetch a book's author, or an author's books: + +```swift +// Fetch all novels from an author +try dbQueue.read { db in + let author: Author = ... + let novels: [Book] = try author.request(for: Author.books) + .filter(kind: .novel) + .orderByTitle() + .fetchAll(db) +} +``` + +Associations also make it possible to define more convenience request methods: + +```swift +extension DerivableRequest { + /// Filters books from a country + func filter(authorCountryCode countryCode: String) -> Self { + // Books do not have any country column. But their author has one! + // Return books that can be joined to an author from this country: + joining(required: Book.author.filter(countryCode: countryCode)) + } +} + +// Fetch all Italian novels +try dbQueue.read { db in + let italianNovels: [Book] = try Book.all() + .filter(kind: .novel) + .filter(authorCountryCode: "IT") + .fetchAll(db) +} +``` + +With associations, you can also process graphs of authors and books, as described in the next section. + +### How to Model Graphs of Objects + +Since the beginning of this article, the `Book` and `Author` are independent structs that don't know each other. The only "meeting point" is the `Book.authorId` property. + +Record types don't know each other on purpose: one does not need to know the author of a book when it's time to update the title of a book, for example. + +When an application wants to process authors and books together, it defines dedicated types that model the desired view on the graph of related objects. For example: + +```swift +// Fetch all authors along with their number of books +struct AuthorInfo: Decodable, FetchableRecord { + var author: Author + var bookCount: Int +} +let authorInfos: [AuthorInfo] = try dbQueue.read { db in + try Author + .annotated(with: Author.books.count) + .asRequest(of: AuthorInfo.self) + .fetchAll(db) +} +``` + +```swift +// Fetch the literary careers of German authors, sorted by name +struct LiteraryCareer: Codable, FetchableRecord { + var author: Author + var books: [Book] +} +let careers: [LiteraryCareer] = try dbQueue.read { db in + try Author + .filter(countryCode: "DE") + .orderByName() + .including(all: Author.books) + .asRequest(of: LiteraryCareer.self) + .fetchAll(db) +} +``` + +```swift +// Fetch all Colombian books and their authors +struct Authorship: Decodable, FetchableRecord { + var book: Book + var author: Author +} +let authorships: [Authorship] = try dbQueue.read { db in + try Book.all() + .including(required: Book.author.filter(countryCode: "CO")) + .asRequest(of: Authorship.self) + .fetchAll(db) + + // Equivalent alternative + try Book.all() + .filter(authorCountryCode: "CO") + .including(required: Book.author) + .asRequest(of: Authorship.self) + .fetchAll(db) +} +``` + +In the above sample codes, requests that fetch values from several tables are decoded into additional record types: `AuthorInfo`, `LiteraryCareer`, and `Authorship`. + +Those record type conform to both [`Decodable`] and ``FetchableRecord``, so that they can feed from database rows. They do not provide any persistence methods, though. **All database writes are performed from persistable record instances** (of type `Author` or `Book`). + +For more information about associations, see the [Associations] guide. + +### Lazy and Eager Loading: Comparison with Other Database Libraries + +The additional record types described in the previous section may look superfluous. Some other database libraries are able to navigate in graphs of records without additional types. + +For example, [Core Data] and Ruby's [Active Record] use **lazy loading**. This means that relationships are lazily fetched on demand: + +```ruby +# Lazy loading with Active Record +author = Author.first # Fetch first author +puts author.name +author.books.each do |book| # Lazily fetch books on demand + puts book.title +end +``` + +**GRDB does not perform lazy loading.** In a GUI application, lazy loading can not be achieved without record management (as in [Core Data]), which in turn comes with non-trivial pain points for developers regarding concurrency. Instead of lazy loading, the library provides the tooling needed to fetch data, even complex graphs, in an [isolated] fashion, so that fetched values accurately represent the database content, and all database invariants are preserved. See the guide for more information. + +Vapor [Fluent] uses **eager loading**, which means that relationships are only fetched if explicitly requested: + +```swift +// Eager loading with Fluent +let query = Author.query(on: db) + .with(\.$books) // <- Explicit request for books + .first() + +// Fetch first author and its books in one stroke +if let author = query.get() { + print(author.name) + for book in author.books { print(book.title) } +} +``` + +One must take care of fetching relationships, though, or Fluent raises a fatal error: + +```swift +// Oops, the books relation is not explicitly requested +let query = Author.query(on: db).first() +if let author = query.get() { + // fatal error: Children relation not eager loaded. + for book in author.books { print(book.title) } +} +``` + +**GRDB supports eager loading**. The difference with Fluent is that the relationships are modelled in a dedicated record type that provides runtime safety: + +```swift +// Eager loading with GRDB +struct LiteraryCareer: Codable, FetchableRecord { + var author: Author + var books: [Book] +} + +let request = Author.all() + .including(all: Author.books) // <- Explicit request for books + .asRequest(of: LiteraryCareer.self) + +// Fetch first author and its books in one stroke +if let career = try request.fetchOne(db) { + print(career.author.name) + for book in career.books { print(book.title) } +} +``` + +[Active Record]: http://guides.rubyonrails.org/active_record_basics.html +[`Codable`]: https://developer.apple.com/documentation/swift/Codable +[Core Data]: https://developer.apple.com/documentation/coredata +[`Decimal`]: https://developer.apple.com/documentation/foundation/decimal +[`Decodable`]: https://developer.apple.com/documentation/swift/Decodable +[Django]: https://docs.djangoproject.com/en/4.2/topics/db/ +[Fluent]: https://docs.vapor.codes/fluent/overview/ +[`Identifiable`]: https://developer.apple.com/documentation/swift/identifiable +[isolated]: https://en.wikipedia.org/wiki/Isolation_(database_systems) +[Associations]: https://github.com/groue/GRDB.swift/blob/master/Documentation/AssociationsBasics.md diff --git a/GRDB/Documentation.docc/RecordTimestamps.md b/GRDB/Documentation.docc/RecordTimestamps.md new file mode 100644 index 0000000000..44fb2e5e8c --- /dev/null +++ b/GRDB/Documentation.docc/RecordTimestamps.md @@ -0,0 +1,440 @@ +# Record Timestamps and Transaction Date + +Learn how applications can save creation and modification dates of records. + +## Overview + +Some applications want to record creation and modification dates of database records. This article provides some advice and sample code that you can adapt for your specific needs. + +> Note: Creation and modification dates can be automatically handled by [SQLite triggers](https://www.sqlite.org/lang_createtrigger.html). We'll explore a different technique, though. +> +> This is not an advice against triggers, and you won't feel hindered in any way if you prefer to use triggers. Still, consider: +> +> - A trigger does not suffer any exception, when some applications eventually want to fine-tune timestamps, or to perform migrations without touching timestamps. +> - The current time, according to SQLite, is not guaranteed to be constant in a given transaction. This may create undesired timestamp variations. We'll see below how GRDB provides a date that is constant at any point during a transaction. +> - The current time, according to SQLite, can't be controlled in tests and previews. + +We'll start from this table and record type: + +```swift +try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("creationDate", .datetime).notNull() + t.column("modificationDate", .datetime).notNull() + t.column("name", .text).notNull() + t.column("score", .integer).notNull() +} + +struct Player { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + var score: Int +} +``` + +See how the table has non-null dates, while the record has optional dates. + +This is because we intend, in this article, to timestamp actual database operations. The `creationDate` property is the date of database insertion, and `modificationDate` is the date of last modification in the database. A new `Player` instance has no meaningful timestamp until it is saved, and this absence of information is represented with `nil`: + +```swift +// A new player has no timestamps. +var player = Player(id: nil, name: "Arthur", score: 1000) +player.id // nil, because never saved +player.creationDate // nil, because never saved +player.modificationDate // nil, because never saved + +// After insertion, the player has timestamps. +try dbQueue.write { db in + try player.insert(db) +} +player.id // not nil +player.creationDate // not nil +player.modificationDate // not nil +``` + +In the rest of the article, we'll address insertion first, then updates, and see a way to avoid those optional timestamps. The article ends with a sample protocol that your app may adapt and reuse. + +- +- +- +- + +## Insertion Timestamp + +On insertion, the `Player` record should get fresh `creationDate` and `modificationDate`. The ``MutablePersistableRecord`` protocol provides the necessary tooling, with the ``MutablePersistableRecord/willInsert(_:)-1xfwo`` persistence callback. Before insertion, the record sets both its `creationDate` and `modificationDate`: + +```swift +extension Player: Encodable, MutablePersistableRecord { + /// Sets both `creationDate` and `modificationDate` to the + /// transaction date, if they are not set yet. + mutating func willInsert(_ db: Database) throws { + if creationDate == nil { + creationDate = try db.transactionDate + } + if modificationDate == nil { + modificationDate = try db.transactionDate + } + } + + /// Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} + +try dbQueue.write { db in + // An inserted record has both a creation and a modification date. + var player = Player(name: "Arthur", score: 1000) + try player.insert(db) + player.creationDate // not nil + player.modificationDate // not nil +} +``` + +The `willInsert` callback uses the ``Database/transactionDate`` instead of `Date()`. This has two advantages: + +- Within a write transaction, all inserted players get the same timestamp: + + ```swift + // All players have the same timestamp. + try dbQueue.write { db in + for var player in players { + try player.insert(db) + } + } + ``` + +- The transaction date can be configured with ``Configuration/transactionClock``, so that your tests and previews can control the date. + +## Modification Timestamp + +Let's now deal with updates. The `update` persistence method won't automatically bump the timestamp as the `insert` method does. We have to explicitly deal with the modification date: + +```swift +// Increment the player score (two different ways). +try dbQueue.write { db in + var player: Player + + // Update all columns + player.score += 1 + player.modificationDate = try db.transactionDate + try player.update(db) + + // Alternatively, update only the modified columns + try player.updateChanges(db) { + $0.score += 1 + $0.modificationDate = try db.transactionDate + } +} +``` + +Again, we use ``Database/transactionDate``, so that all modified players get the same timestamp within a given write transaction. + +> Note: The insertion case could profit from automatic initialization of the creation date with the ``MutablePersistableRecord/willInsert(_:)-1xfwo`` persistence callback, but the modification date is not handled with ``MutablePersistableRecord/willUpdate(_:columns:)-3oko4``. Instead, the above sample code explicitly modifies the modification date. +> +> This may look like an inconvenience, but there are several reasons for this: +> +> 1. The persistence methods that update are not mutating methods. `willUpdate` can not modify the modification date. +> +> 2. Automatic changes to the modification date from the general `update` method create problems. +> +> Developers are seduced by this convenient-looking feature, but they also eventually want to disable automatic timestamp updates in specific circumstances. That's because application requirements happen to change, and developers happen to overlook some corner cases. +> +> This need is well acknowledged by existing database libraries: to disable automatic timestamp updates, [ActiveRecord](https://stackoverflow.com/questions/861448/is-there-a-way-to-avoid-automatically-updating-rails-timestamp-fields) uses globals (not thread-safe in a Swift application), [Django ORM](https://stackoverflow.com/questions/7499767/temporarily-disable-auto-now-auto-now-add) does not make it easy, and [Fluent](https://github.com/vapor/fluent-kit/issues/355) simply does not allow it. +> +> None of those solutions or lack thereof are seducing. +> +> 3. Not all applications need one modification timestamp. For example, some need one timestamp per property, or per group of properties. +> +> By not providing automatic timestamp updates, all GRDB-powered applications are treated equally: they explicitly bump their modification timestamps when needed. Apps can help themselves by introducing protocols dedicated to their particular handling of updates. For an example of such a protocol, see below. + +## Dealing with Optional Timestamps + +When you fetch timestamped records from the database, it may be inconvenient to deal with optional dates, even though the database columns are guaranteed to be not null: + +```swift +let player = try dbQueue.read { db + try Player.find(db, key: 1) +} +player.creationDate // optional 😕 +player.modificationDate // optional 😕 +``` + +A possible technique is to define two record types: one that deals with players in general (optional timestamps), and one that only deals with persisted players (non-optional dates): + +```swift +/// `Player` deals with unsaved players +struct Player { + var id: Int64? // optional + var creationDate: Date? // optional + var modificationDate: Date? // optional + var name: String + var score: Int +} + +extension Player: Encodable, MutablePersistableRecord { + /// Updates auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + + /// Sets both `creationDate` and `modificationDate` to the + /// transaction date, if they are not set yet. + mutating func willInsert(_ db: Database) throws { + if creationDate == nil { + creationDate = try db.transactionDate + } + if modificationDate == nil { + modificationDate = try db.transactionDate + } + } +} + +/// `PersistedPlayer` deals with persisted players +struct PersistedPlayer: Identifiable { + let id: Int64 // not optional + let creationDate: Date // not optional + var modificationDate: Date // not optional + var name: String + var score: Int +} + +extension PersistedPlayer: Codable, FetchableRecord, PersistableRecord { + static var databaseTableName: String { "player" } +} +``` + +Usage: + +```swift +// Fetch +try dbQueue.read { db + let persistedPlayer = try PersistedPlayer.find(db, id: 1) + persistedPlayer.creationDate // not optional + persistedPlayer.modificationDate // not optional +} + +// Insert +try dbQueue.write { db in + var player = Player(id: nil, name: "Arthur", score: 1000) + player.id // nil + player.creationDate // nil + player.modificationDate // nil + + let persistedPlayer = try player.insertAndFetch(db, as: PersistedPlayer.self) + persistedPlayer.id // not optional + persistedPlayer.creationDate // not optional + persistedPlayer.modificationDate // not optional +} +``` + +See ``MutablePersistableRecord/insertAndFetch(_:onConflict:as:)`` and related methods for more information. + +## Sample code: TimestampedRecord + +This section provides a sample protocol for records that track their creation and modification dates. + +You can copy it in your application, or use it as an inspiration. Not all apps have the same needs regarding timestamps! + +`TimestampedRecord` provides the following features and methods: + +- Use it as a replacement for `MutablePersistableRecord` (even if your record does not use an auto-incremented primary key): + + ```swift + // The base Player type + struct Player { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + var score: Int + } + + // Add database powers (read, write, timestamps) + extension Player: Codable, TimestampedRecord, FetchableRecord { + /// Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + } + ``` + +- Timestamps are set on insertion: + + ```swift + try dbQueue.write { db in + // An inserted record has both a creation and a modification date. + var player = Player(name: "Arthur", score: 1000) + try player.insert(db) + player.creationDate // not nil + player.modificationDate // not nil + } + ``` + +- `updateWithTimestamp()` behaves like ``MutablePersistableRecord/update(_:onConflict:)``, but it also bumps the modification date. + + ```swift + // Bump the modification date and update all columns in the database. + player.score += 1 + try player.updateWithTimestamp(db) + ``` + +- `updateChangesWithTimestamp()` behaves like ``MutablePersistableRecord/updateChanges(_:onConflict:modify:)``, but it also bumps the modification date if the record is modified. + + ```swift + // Only bump the modification date if record is changed, and only + // update the changed columns. + try player.updateChangesWithTimestamp(db) { + $0.score = 1000 + } + + // Prefer updateChanges() if the modification date should always be + // updated, even if other columns are not changed. + try player.updateChanges(db) { + $0.score = 1000 + $0.modificationDate = try db.transactionDate + } + ``` + +- `touch()` only updates the modification date in the database, just like the `touch` unix command. + + ```swift + // Only update the modification date in the database. + try player.touch(db) + ``` + +- There is no `TimestampedRecord.saveWithTimestamp()` method that would insert or update, like ``MutablePersistableRecord/save(_:onConflict:)``. You are encouraged to write instead (and maybe extend your version of `TimestampedRecord` so that it supports this pattern): + + ```swift + extension Player { + /// If the player has a non-nil primary key and a matching row in + /// the database, the player is updated. Otherwise, it is inserted. + mutating func saveWithTimestamp(_ db: Database) throws { + // Test the presence of id first, so that we don't perform an + // update that would surely throw RecordError.recordNotFound. + if id == nil { + try insert(db) + } else { + do { + try updateWithTimestamp(db) + } catch RecordError.recordNotFound { + // Primary key is set, but no row was updated. + try insert(db) + } + } + } + } + ``` + +The full implementation of `TimestampedRecord` follows: + +```swift +/// A record type that tracks its creation and modification dates. See +/// +protocol TimestampedRecord: MutablePersistableRecord { + var creationDate: Date? { get set } + var modificationDate: Date? { get set } +} + +extension TimestampedRecord { + /// By default, `TimestampedRecord` types set `creationDate` and + /// `modificationDate` to the transaction date, if they are nil, + /// before insertion. + /// + /// `TimestampedRecord` types that customize the `willInsert` + /// persistence callback should call `initializeTimestamps` from + /// their implementation. + mutating func willInsert(_ db: Database) throws { + try initializeTimestamps(db) + } + + /// Sets `creationDate` and `modificationDate` to the transaction date, + /// if they are nil. + /// + /// It is called automatically before insertion, if your type does not + /// customize the `willInsert` persistence callback. If you customize + /// this callback, call `initializeTimestamps` from your implementation. + mutating func initializeTimestamps(_ db: Database) throws { + if creationDate == nil { + creationDate = try db.transactionDate + } + if modificationDate == nil { + modificationDate = try db.transactionDate + } + } + + /// Sets `modificationDate`, and executes an `UPDATE` statement + /// on all columns. + /// + /// - parameter modificationDate: The modification date. If nil, the + /// transaction date is used. + mutating func updateWithTimestamp(_ db: Database, modificationDate: Date? = nil) throws { + self.modificationDate = try modificationDate ?? db.transactionDate + try update(db) + } + + /// Modifies the record according to the provided `modify` closure, and, + /// if and only if the record was modified, sets `modificationDate` and + /// executes an `UPDATE` statement that updates the modified columns. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.write { db in + /// var player = Player.find(db, id: 1) + /// let modified = try player.updateChangesWithTimestamp(db) { + /// $0.score = 1000 + /// } + /// if modified { + /// print("player was modified") + /// } else { + /// print("player was not modified") + /// } + /// } + /// ``` + /// + /// - parameters: + /// - db: A database connection. + /// - modificationDate: The modification date. If nil, the + /// transaction date is used. + /// - modify: A closure that modifies the record. + /// - returns: Whether the record was changed and updated. + @discardableResult + mutating func updateChangesWithTimestamp( + _ db: Database, + modificationDate: Date? = nil, + modify: (inout Self) -> Void) + throws -> Bool + { + // Grab the changes performed by `modify` + let initialChanges = try databaseChanges(modify: modify) + if initialChanges.isEmpty { + return false + } + + // Update modification date and grab its column name + let dateChanges = try databaseChanges(modify: { + $0.modificationDate = try modificationDate ?? db.transactionDate + }) + + // Update the modified columns + let modifiedColumns = Set(initialChanges.keys).union(dateChanges.keys) + try update(db, columns: modifiedColumns) + return true + } + + /// Sets `modificationDate`, and executes an `UPDATE` statement that + /// updates the `modificationDate` column, if and only if the record + /// was modified. + /// + /// - parameter modificationDate: The modification date. If nil, the + /// transaction date is used. + mutating func touch(_ db: Database, modificationDate: Date? = nil) throws { + try updateChanges(db) { + $0.modificationDate = try modificationDate ?? db.transactionDate + } + } +} +``` diff --git a/GRDB/Documentation.docc/Resources/GRDBLogo.png b/GRDB/Documentation.docc/Resources/GRDBLogo.png new file mode 100644 index 0000000000..309fe68b90 Binary files /dev/null and b/GRDB/Documentation.docc/Resources/GRDBLogo.png differ diff --git a/GRDB/Documentation.docc/Resources/GRDBLogo@2x.png b/GRDB/Documentation.docc/Resources/GRDBLogo@2x.png new file mode 100644 index 0000000000..b8df5c7ac1 Binary files /dev/null and b/GRDB/Documentation.docc/Resources/GRDBLogo@2x.png differ diff --git a/GRDB/Documentation.docc/Resources/GRDBLogo@3x.png b/GRDB/Documentation.docc/Resources/GRDBLogo@3x.png new file mode 100644 index 0000000000..d30fcf5bbc Binary files /dev/null and b/GRDB/Documentation.docc/Resources/GRDBLogo@3x.png differ diff --git a/GRDB/Documentation.docc/Resources/GRDBLogo~dark.png b/GRDB/Documentation.docc/Resources/GRDBLogo~dark.png new file mode 100644 index 0000000000..d3fe61a660 Binary files /dev/null and b/GRDB/Documentation.docc/Resources/GRDBLogo~dark.png differ diff --git a/GRDB/Documentation.docc/Resources/GRDBLogo~dark@2x.png b/GRDB/Documentation.docc/Resources/GRDBLogo~dark@2x.png new file mode 100644 index 0000000000..3ace0328d8 Binary files /dev/null and b/GRDB/Documentation.docc/Resources/GRDBLogo~dark@2x.png differ diff --git a/GRDB/Documentation.docc/Resources/GRDBLogo~dark@3x.png b/GRDB/Documentation.docc/Resources/GRDBLogo~dark@3x.png new file mode 100644 index 0000000000..64f5d107bc Binary files /dev/null and b/GRDB/Documentation.docc/Resources/GRDBLogo~dark@3x.png differ diff --git a/GRDB/Documentation.docc/SQLSupport.md b/GRDB/Documentation.docc/SQLSupport.md index 7fbdb7cd00..36cda8d4da 100644 --- a/GRDB/Documentation.docc/SQLSupport.md +++ b/GRDB/Documentation.docc/SQLSupport.md @@ -62,6 +62,8 @@ try dbQueue.read { db in } ``` +For a more detailed overview, see [SQLite API](https://github.com/groue/GRDB.swift/blob/master/README.md#sqlite-api). + ## Topics ### Fundamental Database Types diff --git a/GRDB/Documentation.docc/SingleRowTables.md b/GRDB/Documentation.docc/SingleRowTables.md index cf90c69953..b3c842fcf9 100644 --- a/GRDB/Documentation.docc/SingleRowTables.md +++ b/GRDB/Documentation.docc/SingleRowTables.md @@ -8,9 +8,11 @@ Database tables that contain a single row can store configuration values, user p They are a suitable alternative to `UserDefaults` in some applications, especially when configuration refers to values found in other database tables, and database integrity is a concern. -An alternative way to store such configuration is a table of key-value pairs: two columns, and one row for each configuration value. This technique works, but it has a few drawbacks: you will have to deal with the various types of configuration values (strings, integers, dates, etc), and you won't be able to define foreign keys. This is why we won't explore key-value tables. +A possible way to store such configuration is a table of key-value pairs: two columns, and one row for each configuration value. This technique works, but it has a few drawbacks: one has to deal with the various types of configuration values (strings, integers, dates, etc), and it is not possible to define foreign keys. This is why we won't explore key-value tables. -This guide helps implementing a single-row table with GRDB, with recommendations on the database schema, migrations, and the design of a matching record type. +In this guide, we'll implement a single-row table, with recommendations on the database schema, migrations, and the design of a Swift API for accessing the configuration values. The schema will define one column for each configuration value, because we aim at being able to deal with foreign keys and references to other tables. You may prefer storing configuration values in a single JSON column. In this case, take inspiration from this guide, as well as . + +We will also aim at providing a default value for a given configuration, even when it is not stored on disk yet. This is a feature similar to [`UserDefaults.register(defaults:)`](https://developer.apple.com/documentation/foundation/userdefaults/1417065-register). ## The Single-Row Table @@ -20,63 +22,43 @@ We want to instruct SQLite that our table must never contain more than one row. SQLite is not able to guarantee that the table is never empty, so we have to deal with two cases: either the table is empty, or it contains one row. -Those two cases can create a nagging question for the application. By default, inserts fail when the row already exists, and updates fail when the table is empty. In order to avoid those errors, we will have the app deal with updates in the section below. Right now, we instruct SQLite to just replace the eventual existing row in case of conflicting inserts: - -```swift -// CREATE TABLE appConfiguration ( -// id INTEGER PRIMARY KEY ON CONFLICT REPLACE CHECK (id = 1), -// flag BOOLEAN NOT NULL, -// ...) -try db.create(table: "appConfiguration") { t in - // Single row guarantee: have inserts replace the existing row - t.primaryKey("id", .integer, onConflict: .replace) - // Make sure the id column is always 1 - .check { $0 == 1 } - - // The configuration columns - t.column("flag", .boolean).notNull() - // ... other columns -} -``` - -When you use , you may wonder if it is a good idea or not to perform an initial insert just after the table is created. Well, this is not recommended: +Those two cases can create a nagging question for the application. By default, inserts fail when the row already exists, and updates fail when the table is empty. In order to avoid those errors, we will have the app deal with updates in the section below. Right now, we instruct SQLite to just replace the eventual existing row in case of conflicting inserts. ```swift -// NOT RECOMMENDED migrator.registerMigration("appConfiguration") { db in + // CREATE TABLE appConfiguration ( + // id INTEGER PRIMARY KEY ON CONFLICT REPLACE CHECK (id = 1), + // storedFlag BOOLEAN, + // ...) try db.create(table: "appConfiguration") { t in - // The single row guarantee - t.primaryKey("id", .integer, onConflict: .replace).check { $0 == 1 } + // Single row guarantee: have inserts replace the existing row, + // and make sure the id column is always 1. + t.primaryKey("id", .integer, onConflict: .replace) + .check { $0 == 1 } - // Define sensible defaults for each column - t.column("flag", .boolean).notNull() - .defaults(to: false) + // The configuration columns + t.column("storedFlag", .boolean) // ... other columns } - - // Populate the table - try db.execute(sql: "INSERT INTO appConfiguration DEFAULT VALUES") } ``` -It is not a good idea to populate the table in a migration, for two reasons: +Note how the database table is defined in a migration. That's because most apps evolve, and need to add other configuration columns eventually. See for more information. -1. This migration is not a hard guarantee that the table will never be empty. As a consequence, this won't prevent the application code from dealing with the possibility of a missing row. On top of that, this application code may not use the same default values as the SQLite schema, with unclear consequences. +We have defined a `storedFlag` column that can be NULL. That may be surprising, because optional booleans are usually a bad idea! But we can deal with this NULL at runtime, and nullable columns have a few advantages: -2. Migrations that have been deployed on the users' devices should never change (see ). Inserting an initial row in a migration makes it difficult for the application to adjust the sensible default values in a future version. +- NULL means that the application user had not made a choice yet. When `storedFlag` is NULL, the app can use a default value, such as `true`. +- As application evolves, application will need to add new configuration columns. It is not always possible to provide a sensible default value for these new columns, at the moment the table is modified. On the other side, it is generally possible to deal with those NULL values at runtime. -The recommended migration creates the table, nothing more: +Despite those arguments, some apps absolutely require a value. In this case, don't weaken the application logic and make sure the database can't store a NULL value: ```swift -// RECOMMENDED +// DO NOT hesitate requiring NOT NULL columns when the app requires it. migrator.registerMigration("appConfiguration") { db in try db.create(table: "appConfiguration") { t in - // The single row guarantee t.primaryKey("id", .integer, onConflict: .replace).check { $0 == 1 } - // The configuration columns - t.column("flag", .boolean).notNull() - // ... other columns + t.column("flag", .boolean).notNull() // required } } ``` @@ -91,7 +73,37 @@ struct AppConfiguration: Codable { // Support for the single row guarantee private var id = 1 - // The configuration properties + // The stored properties + private var storedFlag: Bool? + // ... other properties +} +``` + +The `storedFlag` property is private, because we want to expose a nice `flag` property that has a default value when `storedFlag` is nil: + +```swift +// Support for default values +extension AppConfiguration { + var flag: Bool { + get { storedFlag ?? true /* the default value */ } + set { storedFlag = newValue } + } + + mutating func resetFlag() { + storedFlag = nil + } +} +``` + +This ceremony is not needed when the column can not be null: + +```swift +// The simplified setup for non-nullable columns +struct AppConfiguration: Codable { + // Support for the single row guarantee + private var id = 1 + + // The stored properties var flag: Bool // ... other properties } @@ -102,7 +114,7 @@ In case the database table would be empty, we need a default configuration: ```swift extension AppConfiguration { /// The default configuration - static let `default` = AppConfiguration(flag: false) + static let `default` = AppConfiguration(flag: nil) } ``` @@ -129,7 +141,7 @@ The standard GRDB method ``FetchableRecord/fetchOne(_:)`` returns an optional wh ```swift /// Returns the persisted configuration, or the default one if the /// database table is empty. - static func fetch(_ db: Database) throws -> AppConfiguration { + static func find(_ db: Database) throws -> AppConfiguration { try fetchOne(db) ?? .default } } @@ -140,7 +152,7 @@ And that's it! Now we can use our singleton record: ```swift // READ let config = try dbQueue.read { db in - try AppConfiguration.fetch(db) + try AppConfiguration.find(db) } if config.flag { // ... @@ -148,17 +160,19 @@ if config.flag { // WRITE try dbQueue.write { db in - // Saves a new config in the database - var config = try AppConfiguration.fetch(db) + // Update the config in the database + var config = try AppConfiguration.find(db) try config.updateChanges(db) { $0.flag = true } // Other possible ways to save the config: - try config.save(db) - try config.update(db) - try config.insert(db) - try config.upsert(db) + var config = try AppConfiguration.find(db) + config.flag = true + try config.save(db) // all the same + try config.update(db) // all the same + try config.insert(db) // all the same + try config.upsert(db) // all the same } ``` @@ -172,11 +186,13 @@ We all love to copy and paste, don't we? Just customize the template code below: ```swift // Table creation try db.create(table: "appConfiguration") { t in - // The single row guarantee - t.primaryKey("id", .integer, onConflict: .replace).check { $0 == 1 } + // Single row guarantee: have inserts replace the existing row, + // and make sure the id column is always 1. + t.primaryKey("id", .integer, onConflict: .replace) + .check { $0 == 1 } // The configuration columns - t.column("flag", .boolean).notNull() + t.column("storedFlag", .boolean) // ... other columns } ``` @@ -192,14 +208,26 @@ struct AppConfiguration: Codable { // Support for the single row guarantee private var id = 1 - // The configuration properties - var flag: Bool + // The stored properties + private var storedFlag: Bool? // ... other properties } +// Support for default values +extension AppConfiguration { + var flag: Bool { + get { storedFlag ?? true /* the default value */ } + set { storedFlag = newValue } + } + + mutating func resetFlag() { + storedFlag = nil + } +} + extension AppConfiguration { /// The default configuration - static let `default` = AppConfiguration(flag: false, ...) + static let `default` = AppConfiguration(storedFlag: nil) } // Database Access @@ -214,7 +242,7 @@ extension AppConfiguration: FetchableRecord, PersistableRecord { /// Returns the persisted configuration, or the default one if the /// database table is empty. - static func fetch(_ db: Database) throws -> AppConfiguration { + static func find(_ db: Database) throws -> AppConfiguration { try fetchOne(db) ?? .default } } diff --git a/GRDB/Documentation.docc/Transactions.md b/GRDB/Documentation.docc/Transactions.md index b12c3f32f0..56dedce47b 100644 --- a/GRDB/Documentation.docc/Transactions.md +++ b/GRDB/Documentation.docc/Transactions.md @@ -1,5 +1,7 @@ # Transactions and Savepoints +Precise transaction handling. + ## Transactions and Safety **A transaction is a fundamental tool of SQLite** that guarantees [data consistency](https://www.sqlite.org/transactional.html) as well as [proper isolation](https://sqlite.org/isolation.html) between application threads and database connections. It is at the core of GRDB guarantees. diff --git a/GRDB/Dump/Database+Dump.swift b/GRDB/Dump/Database+Dump.swift new file mode 100644 index 0000000000..d4abfb653a --- /dev/null +++ b/GRDB/Dump/Database+Dump.swift @@ -0,0 +1,357 @@ +import Foundation + +// MARK: - Dump + +extension Database { + /// Prints the results of all statements in the provided SQL. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.read { db in + /// // Prints + /// // 1|Arthur|500 + /// // 2|Barbara|1000 + /// try db.dumpSQL("SELECT * FROM player ORDER BY id") + /// } + /// ``` + /// + /// - Parameters: + /// - sql: The executed SQL. + /// - format: The output format. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpSQL( + _ sql: SQL, + format: some DumpFormat = .debug(), + to stream: (any TextOutputStream)? = nil) + throws + { + var dumpStream = DumpStream(stream) + try _dumpSQL(sql, format: format, to: &dumpStream) + } + + /// Prints the results of a request. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.read { db in + /// // Prints + /// // 1|Arthur|500 + /// // 2|Barbara|1000 + /// try db.dumpRequest(Player.orderByPrimaryKey()) + /// } + /// ``` + /// + /// - Parameters: + /// - request : The executed request. + /// - format: The output format. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpRequest( + _ request: some FetchRequest, + format: some DumpFormat = .debug(), + to stream: (any TextOutputStream)? = nil) + throws + { + var dumpStream = DumpStream(stream) + try _dumpRequest(request, format: format, to: &dumpStream) + } + + /// Prints the contents of the provided tables and views. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.read { db in + /// // player + /// // 1|Arthur|500 + /// // 2|Barbara|1000 + /// // + /// // team + /// // 1|Red + /// // 2|Blue + /// try db.dumpTables(["player", "team"]) + /// } + /// ``` + /// + /// - Parameters: + /// - tables: The table names. + /// - format: The output format. + /// - tableHeader: Options for printing table names. + /// - stableOrder: A boolean value that controls the ordering of + /// rows fetched from views. If false (the default), rows are + /// printed in the order specified by the view (which may be + /// undefined). It true, outputted rows are always printed in the + /// same stable order. The purpose of this stable order is to make + /// the output suitable for testing. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpTables( + _ tables: [String], + format: some DumpFormat = .debug(), + tableHeader: DumpTableHeaderOptions = .automatic, + stableOrder: Bool = false, + to stream: (any TextOutputStream)? = nil) + throws + { + var dumpStream = DumpStream(stream) + try _dumpTables( + tables, + format: format, + tableHeader: tableHeader, + stableOrder: stableOrder, + to: &dumpStream) + } + + /// Prints the contents of the database. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.read { db in + /// try db.dumpContent() + /// } + /// ``` + /// + /// This prints the database schema as well as the content of all + /// tables. For example: + /// + /// ``` + /// sqlite_master + /// CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT, score INTEGER) + /// + /// player + /// 1,'Arthur',500 + /// 2,'Barbara',1000 + /// ``` + /// + /// > Note: Internal SQLite and GRDB schema objects are not recorded + /// > (those with a name that starts with "sqlite_" or "grdb_"). + /// > + /// > [Shadow tables](https://www.sqlite.org/vtab.html#xshadowname) are + /// > not recorded, starting SQLite 3.37+. + /// + /// - Parameters: + /// - format: The output format. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpContent( + format: some DumpFormat = .debug(), + to stream: (any TextOutputStream)? = nil) + throws + { + var dumpStream = DumpStream(stream) + try _dumpContent(format: format, to: &dumpStream) + } + + /// Prints the schema of the database. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.read { db in + /// try db.dumpSchema() + /// } + /// ``` + /// + /// This prints the database schema. For example: + /// + /// ``` + /// sqlite_master + /// CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT, score INTEGER) + /// ``` + /// + /// > Note: Internal SQLite and GRDB schema objects are not recorded + /// > (those with a name that starts with "sqlite_" or "grdb_"). + /// > + /// > [Shadow tables](https://www.sqlite.org/vtab.html#xshadowname) are + /// > not recorded, starting SQLite 3.37+. + /// + /// - Parameters: + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpSchema( + to stream: (any TextOutputStream)? = nil) + throws + { + var dumpStream = DumpStream(stream) + try _dumpSchema(to: &dumpStream) + } +} + +// MARK: - + +extension Database { + func _dumpStatements( + _ statements: some Cursor, + format: some DumpFormat, + to stream: inout DumpStream) + throws + { + while let statement = try statements.next() { + var stepFormat = format + let cursor = try statement.makeCursor() + while try cursor.next() != nil { + try stepFormat.writeRow(self, statement: statement, to: &stream) + } + stepFormat.finalize(self, statement: statement, to: &stream) + } + } + + func _dumpSQL( + _ sql: SQL, + format: some DumpFormat, + to stream: inout DumpStream) + throws + { + try _dumpStatements(allStatements(literal: sql), format: format, to: &stream) + } + + func _dumpRequest( + _ request: some FetchRequest, + format: some DumpFormat, + to stream: inout DumpStream) + throws + { + let preparedRequest = try request.makePreparedRequest(self, forSingleResult: false) + try _dumpStatements(AnyCursor([preparedRequest.statement]), format: format, to: &stream) + + if let supplementaryFetch = preparedRequest.supplementaryFetch { + let rows = try Row.fetchAll(self, request) + try withoutActuallyEscaping( + { request, keyPath in + stream.write("\n") + stream.writeln(keyPath.joined(separator: ".")) + try self._dumpRequest(request, format: format, to: &stream) + }, + do: { willExecuteSupplementaryRequest in + try supplementaryFetch(self, rows, willExecuteSupplementaryRequest) + }) + } + } + + func _dumpTables( + _ tables: [String], + format: some DumpFormat, + tableHeader: DumpTableHeaderOptions, + stableOrder: Bool, + to stream: inout DumpStream) + throws + { + let header: Bool + switch tableHeader { + case .always: header = true + case .automatic: header = tables.count > 1 + } + + var first = true + for table in tables { + if first { + first = false + } else { + stream.write("\n") + } + + if header { + stream.writeln(table) + } + + if try tableExists(table) { + // Always sort tables by primary key + try _dumpRequest(Table(table).orderByPrimaryKey(), format: format, to: &stream) + } else if stableOrder { + // View with stable order + try _dumpRequest(Table(table).all().withStableOrder(), format: format, to: &stream) + } else { + // Use view ordering, if any (no guarantee of stable order). + try _dumpRequest(Table(table).all(), format: format, to: &stream) + } + } + } + + func _dumpContent( + format: some DumpFormat, + to stream: inout DumpStream) + throws + { + try _dumpSchema(to: &stream) + stream.margin() + + let tables = try String + .fetchAll(self, sql: """ + SELECT name + FROM sqlite_master + WHERE type = 'table' + ORDER BY name COLLATE NOCASE + """) + .filter { + try !ignoresObject(named: $0) + } + try _dumpTables(tables, format: format, tableHeader: .always, stableOrder: true, to: &stream) + } + + func _dumpSchema( + to stream: inout DumpStream) + throws + { + stream.writeln("sqlite_master") + let sqlRows = try Row.fetchAll(self, sql: """ + SELECT sql || ';', name + FROM sqlite_master + WHERE sql IS NOT NULL + ORDER BY + tbl_name COLLATE NOCASE, + CASE type WHEN 'table' THEN 'a' WHEN 'index' THEN 'aa' ELSE type END, + name COLLATE NOCASE, + sql + """) + for row in sqlRows { + let name: String = row[1] + if try ignoresObject(named: name) { + continue + } + stream.writeln(row[0]) + } + } + + private func ignoresObject(named name: String) throws -> Bool { + if Database.isSQLiteInternalTable(name) { return true } + if Database.isGRDBInternalTable(name) { return true } + if try isShadowTable(name) { return true } + return false + } + + private func isShadowTable(_ tableName: String) throws -> Bool { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Maybe SQLCipher is too old: check actual version + if sqlite3_libversion_number() >= 3037000 { + guard let table = try table(tableName) else { + // Not a table + return false + } + return table.kind == .shadow + } +#else + if #available(iOS 15.4, macOS 12.4, tvOS 15.4, watchOS 8.5, *) { // SQLite 3.37+ + guard let table = try table(tableName) else { + // Not a table + return false + } + return table.kind == .shadow + } +#endif + // Don't know + return false + } +} + +/// Options for printing table names. +public enum DumpTableHeaderOptions: Sendable { + /// Table names are only printed when several tables are printed. + case automatic + + /// Table names are always printed. + case always +} diff --git a/GRDB/Dump/DatabaseReader+dump.swift b/GRDB/Dump/DatabaseReader+dump.swift new file mode 100644 index 0000000000..55e829c89d --- /dev/null +++ b/GRDB/Dump/DatabaseReader+dump.swift @@ -0,0 +1,173 @@ +extension DatabaseReader { + /// Prints the results of all statements in the provided SQL. + /// + /// For example: + /// + /// ```swift + /// // Prints + /// // 1|Arthur|500 + /// // 2|Barbara|1000 + /// try dbQueue.dumpSQL("SELECT * FROM player ORDER BY id") + /// ``` + /// + /// - Parameters: + /// - sql: The executed SQL. + /// - format: The output format. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpSQL( + _ sql: SQL, + format: some DumpFormat = .debug(), + to stream: (any TextOutputStream)? = nil) + throws + { + try unsafeReentrantRead { db in + try db.dumpSQL(sql, format: format, to: stream) + } + } + + /// Prints the results of a request. + /// + /// For example: + /// + /// ```swift + /// // Prints + /// // 1|Arthur|500 + /// // 2|Barbara|1000 + /// try dbQueue.dumpRequest(Player.orderByPrimaryKey()) + /// ``` + /// + /// - Parameters: + /// - request : The executed request. + /// - format: The output format. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpRequest( + _ request: some FetchRequest, + format: some DumpFormat = .debug(), + to stream: (any TextOutputStream)? = nil) + throws + { + try unsafeReentrantRead { db in + try db.dumpRequest(request, format: format, to: stream) + } + } + + /// Prints the contents of the provided tables and views. + /// + /// For example: + /// + /// ```swift + /// // player + /// // 1|Arthur|500 + /// // 2|Barbara|1000 + /// // + /// // team + /// // 1|Red + /// // 2|Blue + /// try dbQueue.dumpTables(["player", "team"]) + /// ``` + /// + /// - Parameters: + /// - tables: The table names. + /// - format: The output format. + /// - tableHeader: Options for printing table names. + /// - stableOrder: A boolean value that controls the ordering of + /// rows fetched from views. If false (the default), rows are + /// printed in the order specified by the view (which may be + /// undefined). It true, outputted rows are always printed in the + /// same stable order. The purpose of this stable order is to make + /// the output suitable for testing. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpTables( + _ tables: [String], + format: some DumpFormat = .debug(), + tableHeader: DumpTableHeaderOptions = .automatic, + stableOrder: Bool = false, + to stream: (any TextOutputStream)? = nil) + throws + { + try unsafeReentrantRead { db in + try db.dumpTables( + tables, + format: format, + tableHeader: tableHeader, + stableOrder: stableOrder, + to: stream) + } + } + + /// Prints the contents of the database. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.dumpContent() + /// ``` + /// + /// This prints the database schema as well as the content of all + /// tables. For example: + /// + /// ``` + /// sqlite_master + /// CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT, score INTEGER) + /// + /// player + /// 1,'Arthur',500 + /// 2,'Barbara',1000 + /// ``` + /// + /// > Note: Internal SQLite and GRDB schema objects are not recorded + /// > (those with a name that starts with "sqlite_" or "grdb_"). + /// > + /// > [Shadow tables](https://www.sqlite.org/vtab.html#xshadowname) are + /// > not recorded, starting SQLite 3.37+. + /// + /// - Parameters: + /// - format: The output format. + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpContent( + format: some DumpFormat = .debug(), + to stream: (any TextOutputStream)? = nil) + throws + { + try unsafeReentrantRead { db in + try db.dumpContent(format: format, to: stream) + } + } + + /// Prints the schema of the database. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.dumpSchema() + /// ``` + /// + /// This prints the database schema. For example: + /// + /// ``` + /// sqlite_master + /// CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT, score INTEGER) + /// ``` + /// + /// > Note: Internal SQLite and GRDB schema objects are not recorded + /// > (those with a name that starts with "sqlite_" or "grdb_"). + /// > + /// > [Shadow tables](https://www.sqlite.org/vtab.html#xshadowname) are + /// > not recorded, starting SQLite 3.37+. + /// + /// - Parameters: + /// - stream: A stream for text output, which directs output to the + /// console by default. + public func dumpSchema( + to stream: (any TextOutputStream)? = nil) + throws + { + try unsafeReentrantRead { db in + try db.dumpSchema(to: stream) + } + } +} diff --git a/GRDB/Dump/DumpFormat.swift b/GRDB/Dump/DumpFormat.swift new file mode 100644 index 0000000000..8157fc0cab --- /dev/null +++ b/GRDB/Dump/DumpFormat.swift @@ -0,0 +1,112 @@ +/// A type that prints database rows. +/// +/// Types that conform to `DumpFormat` feed the printing methods such as +/// ``DatabaseReader/dumpContent(format:to:)`` and +/// ``Database/dumpSQL(_:format:to:)``. +/// +/// Most built-in formats are inspired from the +/// [output formats of the SQLite command line tool](https://sqlite.org/cli.html#changing_output_formats). +/// +/// ## Topics +/// +/// ### Built-in Formats +/// +/// - ``debug(header:separator:nullValue:)`` +/// - ``json(encoder:)`` +/// - ``line(nullValue:)`` +/// - ``list(header:separator:nullValue:)`` +/// - ``quote(header:separator:)`` +/// +/// ### Supporting Types +/// +/// - ``DebugDumpFormat`` +/// - ``JSONDumpFormat`` +/// - ``LineDumpFormat`` +/// - ``ListDumpFormat`` +/// - ``QuoteDumpFormat`` +/// - ``DumpStream`` +/// +/// ### Implementing a custom format +/// +/// [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) +/// +/// - ``writeRow(_:statement:to:)`` +/// - ``finalize(_:statement:to:)`` +public protocol DumpFormat { + /// Writes a row from the given statement. + /// + /// - Parameters: + /// - db: A connection to the database + /// - statement: The iterated statement + /// - stream: A stream for text output. + mutating func writeRow( + _ db: Database, + statement: Statement, + to stream: inout DumpStream) throws + + /// All rows from the statement have been printed. + /// + /// - Parameters: + /// - db: A connection to the database + /// - statement: The statement that was iterated. + /// - stream: A stream for text output. + mutating func finalize( + _ db: Database, + statement: Statement, + to stream: inout DumpStream) +} + +/// A TextOutputStream that prints to standard output +struct StandardOutputStream: TextOutputStream { + func write(_ string: String) { + print(string, terminator: "") + } +} + +/// A text output stream suited for printing database content. +/// +/// [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) +public struct DumpStream { + var base: any TextOutputStream + var needsMarginLine = false + + init(_ base: (any TextOutputStream)?) { + self.base = base ?? StandardOutputStream() + } + + /// Will write `"\n"` before the next non-empty string. + public mutating func margin() { + needsMarginLine = true + } +} + +extension DumpStream: TextOutputStream { + public mutating func write(_ string: String) { + if needsMarginLine && !string.isEmpty { + needsMarginLine = false + if string.first != "\n" { + base.write("\n") + } + } + base.write(string) + } +} + +extension TextOutputStream { + mutating func writeln(_ string: String) { + write(string) + write("\n") + } +} + +extension String { + func leftPadding(toLength newLength: Int, withPad padString: String) -> String { + precondition(padString.count == 1) + if count < newLength { + return String(repeating: padString, count: newLength - count) + self + } else { + let startIndex = index(startIndex, offsetBy: count - newLength) + return String(self[startIndex...]) + } + } +} diff --git a/GRDB/Dump/DumpFormats/DebugDumpFormat.swift b/GRDB/Dump/DumpFormats/DebugDumpFormat.swift new file mode 100644 index 0000000000..02f2f11304 --- /dev/null +++ b/GRDB/Dump/DumpFormats/DebugDumpFormat.swift @@ -0,0 +1,153 @@ +import Foundation + +/// A format that prints one line per database row, suitable +/// for debugging. +/// +/// This format may change in future releases. It is not suitable for +/// processing by other programs, or testing. +/// +/// On each line, database values are separated by a separator (`|` +/// by default). +/// +/// For example: +/// +/// ```swift +/// // Arthur|500 +/// // Barbara|1000 +/// // Craig|200 +/// try db.dumpRequest(Player.all(), format: .debug()) +/// ``` +public struct DebugDumpFormat: Sendable { + /// A boolean value indicating if column labels are printed as the first + /// line of output. + public var header: Bool + + /// The separator between values. + public var separator: String + + /// The string to print for NULL values. + public var nullValue: String + + private var firstRow = true + + /// Creates a `DebugDumpFormat`. + /// + /// - Parameters: + /// - header: A boolean value indicating if column labels are printed + /// as the first line of output. + /// - separator: The separator between values. + /// - nullValue: The string to print for NULL values. + public init( + header: Bool = false, + separator: String = "|", + nullValue: String = "") + { + self.header = header + self.separator = separator + self.nullValue = nullValue + } +} + +extension DebugDumpFormat: DumpFormat { + public mutating func writeRow( + _ db: Database, + statement: Statement, + to stream: inout DumpStream) + { + if firstRow { + firstRow = false + if header { + stream.writeln(statement.columnNames.joined(separator: separator)) + } + } + + let sqliteStatement = statement.sqliteStatement + var first = true + for index in 0.. String { + switch sqlite3_column_type(sqliteStatement, index) { + case SQLITE_NULL: + return nullValue + + case SQLITE_INTEGER: + return Int64(sqliteStatement: sqliteStatement, index: index).description + + case SQLITE_FLOAT: + return Double(sqliteStatement: sqliteStatement, index: index).description + + case SQLITE_BLOB: + let data = Data(sqliteStatement: sqliteStatement, index: index) + if let string = String(data: data, encoding: .utf8) { + return string + } else if data.count == 16, let blob = sqlite3_column_blob(sqliteStatement, index) { + let uuid = UUID(uuid: blob.assumingMemoryBound(to: uuid_t.self).pointee) + return uuid.uuidString + } else { + return try! data.sqlExpression.quotedSQL(db) + } + + case SQLITE_TEXT: + return String(sqliteStatement: sqliteStatement, index: index) + + default: + return "" + } + } +} + +extension DumpFormat where Self == DebugDumpFormat { + /// A format that prints one line per database row, suitable + /// for debugging. + /// + /// This format may change in future releases. It is not suitable for + /// processing by other programs, or testing. + /// + /// On each line, database values are separated by a separator (`|` + /// by default). + /// + /// For example: + /// + /// ```swift + /// // Arthur|500 + /// // Barbara|1000 + /// // Craig|200 + /// try db.dumpRequest(Player.all(), format: .debug()) + /// ``` + /// + /// - Parameters: + /// - header: A boolean value indicating if column labels are printed + /// as the first line of output. + /// - separator: The separator between values. + /// - nullValue: The string to print for NULL values. + public static func debug( + header: Bool = false, + separator: String = "|", + nullValue: String = "") + -> Self + { + DebugDumpFormat(header: header, separator: separator, nullValue: nullValue) + } +} diff --git a/GRDB/Dump/DumpFormats/JSONDumpFormat.swift b/GRDB/Dump/DumpFormats/JSONDumpFormat.swift new file mode 100644 index 0000000000..7e3bc61d5b --- /dev/null +++ b/GRDB/Dump/DumpFormats/JSONDumpFormat.swift @@ -0,0 +1,204 @@ +import Foundation + +/// A format that prints database rows as a JSON array. +/// +/// For example: +/// +/// ```swift +/// // [{"name":"Arthur","score":500}, +/// // {"name":"Barbara","score":1000}] +/// try db.dumpRequest(Player.all(), format: .json()) +/// ``` +/// +/// For a pretty-printed output, customize the JSON encoder: +/// +/// ```swift +/// // [ +/// // { +/// // "name": "Arthur", +/// // "score": 500 +/// // }, +/// // { +/// // "name": "Barbara", +/// // "score": 1000 +/// // } +/// // ] +/// let encoder = JSONDumpFormat.defaultEncoder +/// encoder.outputFormatting = .prettyPrinted +/// try db.dumpRequest(Player.all(), format: .json(encoder)) +/// ``` +public struct JSONDumpFormat: Sendable { + /// The default `JSONEncoder` for database values. + /// + /// It is configured so that blob values (`Data`) are encoded in the + /// base64 format, and Non-conforming floats are encoded as "inf", + /// "-inf" and "nan". + /// + /// It uses the output formatting option + /// `JSONEncoder.OutputFormatting.withoutEscapingSlashes` when available. + /// + /// Modifying the returned encoder does not affect any encoder returned + /// by future calls to this method. It is always safe to use the + /// returned encoder as a starting point for additional customization. + public static var defaultEncoder: JSONEncoder { + // This encoder MUST NOT CHANGE, because some people rely on this format. + let encoder = JSONEncoder() + if #available(iOS 13.0, macOS 10.15, tvOS 13.0, watchOS 6.0, *) { + encoder.outputFormatting = .withoutEscapingSlashes + } + encoder.nonConformingFloatEncodingStrategy = .convertToString( + positiveInfinity: "inf", + negativeInfinity: "-inf", + nan: "nan") + encoder.dataEncodingStrategy = .base64 + return encoder + } + + /// The JSONEncoder that formats individual database values. + public var encoder: JSONEncoder + + var firstRow = true + + /// Creates a `JSONDumpFormat`. + /// + /// - Parameter encoder: The JSONEncoder that formats individual + /// database values. If the outputFormatting` options contain + /// `.prettyPrinted`, the printed array has one value per line. + public init(encoder: JSONEncoder = JSONDumpFormat.defaultEncoder) { + self.encoder = encoder + } +} + +extension JSONDumpFormat: DumpFormat { + public mutating func writeRow( + _ db: Database, + statement: Statement, + to stream: inout DumpStream) + throws { + if firstRow { + firstRow = false + stream.write("[") + if encoder.outputFormatting.contains(.prettyPrinted) { + stream.write("\n") + } + } else { + stream.write(",\n") + } + + if encoder.outputFormatting.contains(.prettyPrinted) { + stream.write(" ") + } + stream.write("{") + let sqliteStatement = statement.sqliteStatement + var first = true + for index in 0.. String { + switch sqlite3_column_type(sqliteStatement, index) { + case SQLITE_NULL: + return "null" + + case SQLITE_INTEGER: + return try formattedValue(Int64(sqliteStatement: sqliteStatement, index: index)) + + case SQLITE_FLOAT: + return try formattedValue(Double(sqliteStatement: sqliteStatement, index: index)) + + case SQLITE_BLOB: + return try formattedValue(Data(sqliteStatement: sqliteStatement, index: index)) + + case SQLITE_TEXT: + return try formattedValue(String(sqliteStatement: sqliteStatement, index: index)) + + default: + return "" + } + } + + private func formattedValue(_ value: some Encodable) throws -> String { + let data = try encoder.encode(value) + guard let string = String(data: data, encoding: .utf8) else { + throw EncodingError.invalidValue(data, .init(codingPath: [], debugDescription: "Invalid JSON data")) + } + return string + } +} + +extension DumpFormat where Self == JSONDumpFormat { + /// A format that prints database rows as a JSON array. + /// + /// For example: + /// + /// ```swift + /// // [{"name":"Arthur","score":500}, + /// // {"name":"Barbara","score":1000}] + /// try db.dumpRequest(Player.all(), format: .json()) + /// ``` + /// + /// For a pretty-printed output, customize the JSON encoder: + /// + /// ```swift + /// // [ + /// // { + /// // "name": "Arthur", + /// // "score": 500 + /// // }, + /// // { + /// // "name": "Barbara", + /// // "score": 1000 + /// // } + /// // ] + /// let encoder = JSONDumpFormat.defaultEncoder + /// encoder.outputFormatting = .prettyPrinted + /// try db.dumpRequest(Player.all(), format: .json(encoder)) + /// ``` + /// + /// - Parameter encoder: The JSONEncoder that formats individual + /// database values. If the outputFormatting` options contain + /// `.prettyPrinted`, the printed array has one value per line. + public static func json(encoder: JSONEncoder = JSONDumpFormat.defaultEncoder) -> Self { + JSONDumpFormat(encoder: encoder) + } +} diff --git a/GRDB/Dump/DumpFormats/LineDumpFormat.swift b/GRDB/Dump/DumpFormats/LineDumpFormat.swift new file mode 100644 index 0000000000..a0c9f9a1fd --- /dev/null +++ b/GRDB/Dump/DumpFormats/LineDumpFormat.swift @@ -0,0 +1,129 @@ +import Foundation + +/// A format that prints one line per database value. All blob values +/// are interpreted as strings. +/// +/// For example: +/// +/// ```swift +/// // name = Arthur +/// // score = 500 +/// // +/// // name = Barbara +/// // score = 1000 +/// try db.dumpRequest(Player.all(), format: .line()) +/// ``` +public struct LineDumpFormat: Sendable { + /// The string to print for NULL values. + public var nullValue: String + + var firstRow = true + + /// Creates a `LineDumpFormat`. + /// + /// - Parameters: + /// - nullValue: The string to print for NULL values. + public init( + nullValue: String = "") + { + self.nullValue = nullValue + } +} + +extension LineDumpFormat: DumpFormat { + public mutating func writeRow( + _ db: Database, + statement: Statement, + to stream: inout DumpStream) + { + var lines: [(column: String, value: String)] = [] + let sqliteStatement = statement.sqliteStatement + for index in 0.. Self + { + LineDumpFormat(nullValue: nullValue) + } +} diff --git a/GRDB/Dump/DumpFormats/ListDumpFormat.swift b/GRDB/Dump/DumpFormats/ListDumpFormat.swift new file mode 100644 index 0000000000..c256af4c82 --- /dev/null +++ b/GRDB/Dump/DumpFormats/ListDumpFormat.swift @@ -0,0 +1,141 @@ +import Foundation + +/// A format that prints one line per database row. All blob values +/// are interpreted as strings. +/// +/// On each line, database values are separated by a separator (`|` +/// by default). Blob values are interpreted as UTF8 strings. +/// +/// For example: +/// +/// ```swift +/// // Arthur|500 +/// // Barbara|1000 +/// // Craig|200 +/// try db.dumpRequest(Player.all(), format: .list()) +/// ``` +public struct ListDumpFormat: Sendable { + /// A boolean value indicating if column labels are printed as the first + /// line of output. + public var header: Bool + + /// The separator between values. + public var separator: String + + /// The string to print for NULL values. + public var nullValue: String + + private var firstRow = true + + /// Creates a `ListDumpFormat`. + /// + /// - Parameters: + /// - header: A boolean value indicating if column labels are printed + /// as the first line of output. + /// - separator: The separator between values. + /// - nullValue: The string to print for NULL values. + public init( + header: Bool = false, + separator: String = "|", + nullValue: String = "") + { + self.header = header + self.separator = separator + self.nullValue = nullValue + } +} + +extension ListDumpFormat: DumpFormat { + public mutating func writeRow( + _ db: Database, + statement: Statement, + to stream: inout DumpStream) + { + if firstRow { + firstRow = false + if header { + stream.writeln(statement.columnNames.joined(separator: separator)) + } + } + + let sqliteStatement = statement.sqliteStatement + var first = true + for index in 0.. String + { + switch sqlite3_column_type(sqliteStatement, index) { + case SQLITE_NULL: + return nullValue + + case SQLITE_INTEGER: + return Int64(sqliteStatement: sqliteStatement, index: index).description + + case SQLITE_FLOAT: + return Double(sqliteStatement: sqliteStatement, index: index).description + + case SQLITE_BLOB, SQLITE_TEXT: + return String(sqliteStatement: sqliteStatement, index: index) + + default: + return "" + } + } +} + +extension DumpFormat where Self == ListDumpFormat { + /// A format that prints one line per database row. All blob values + /// are interpreted as strings. + /// + /// On each line, database values are separated by a separator (`|` + /// by default). Blob values are interpreted as UTF8 strings. + /// + /// For example: + /// + /// ```swift + /// // Arthur|500 + /// // Barbara|1000 + /// // Craig|200 + /// try db.dumpRequest(Player.all(), format: .list()) + /// ``` + /// + /// - Parameters: + /// - header: A boolean value indicating if column labels are printed + /// as the first line of output. + /// - separator: The separator between values. + /// - nullValue: The string to print for NULL values. + public static func list( + header: Bool = false, + separator: String = "|", + nullValue: String = "") + -> Self + { + ListDumpFormat(header: header, separator: separator, nullValue: nullValue) + } +} diff --git a/GRDB/Dump/DumpFormats/QuoteDumpFormat.swift b/GRDB/Dump/DumpFormats/QuoteDumpFormat.swift new file mode 100644 index 0000000000..3c6ae5c17a --- /dev/null +++ b/GRDB/Dump/DumpFormats/QuoteDumpFormat.swift @@ -0,0 +1,101 @@ +/// A format that prints one line per database row, formatting values +/// as SQL literals. +/// +/// For example: +/// +/// ```swift +/// // 'Arthur',500 +/// // 'Barbara',1000 +/// // 'Craig',200 +/// try db.dumpRequest(Player.all(), format: .quote()) +/// ``` +public struct QuoteDumpFormat: Sendable { + /// A boolean value indicating if column labels are printed as the first + /// line of output. + public var header: Bool + + /// The separator between values. + public var separator: String + + var firstRow = true + + /// Creates a `QuoteDumpFormat`. + /// + /// - Parameters: + /// - header: A boolean value indicating if column labels are printed + /// as the first line of output. + /// - separator: The separator between values. + public init( + header: Bool = false, + separator: String = ",") + { + self.header = header + self.separator = separator + } +} + +extension QuoteDumpFormat: DumpFormat { + public mutating func writeRow( + _ db: Database, + statement: Statement, + to stream: inout DumpStream) + { + if firstRow { + firstRow = false + if header { + stream.writeln(statement.columnNames + .map { try! $0.sqlExpression.quotedSQL(db) } + .joined(separator: separator)) + } + } + + let sqliteStatement = statement.sqliteStatement + var first = true + for index in 0.. Self { + QuoteDumpFormat(header: header, separator: separator) + } +} diff --git a/GRDB/Export.swift b/GRDB/Export.swift index bf3e6d0d1a..f5aaed5728 100644 --- a/GRDB/Export.swift +++ b/GRDB/Export.swift @@ -1 +1,8 @@ +// Export the underlying SQLite library +#if SWIFT_PACKAGE +@_exported import CSQLite +#elseif GRDBCIPHER @_exported import SQLCipher +#elseif !GRDBCUSTOMSQLITE && !GRDBCIPHER +@_exported import SQLite3 +#endif diff --git a/GRDB/FTS/FTS3.swift b/GRDB/FTS/FTS3.swift index d93549816c..80e677ae18 100644 --- a/GRDB/FTS/FTS3.swift +++ b/GRDB/FTS/FTS3.swift @@ -1,7 +1,14 @@ /// The virtual table module for the FTS3 full-text engine. /// /// To create FTS3 tables, use the ``Database`` method -/// ``Database/create(virtualTable:ifNotExists:using:_:)``. +/// ``Database/create(virtualTable:ifNotExists:using:_:)``: +/// +/// ```swift +/// // CREATE VIRTUAL TABLE document USING fts3(content) +/// try db.create(virtualTable: "document", using: FTS3()) { t in +/// t.column("content") +/// } +/// ``` /// /// Related SQLite documentation: /// @@ -22,27 +29,25 @@ /// - ``tokenize(_:withTokenizer:)`` public struct FTS3 { /// Options for Latin script characters. - public enum Diacritics { - /// Do not remove diacritics from Latin script characters. + public enum Diacritics: Sendable { + /// Do not remove diacritics from Latin script characters. This option + /// matches the `remove_diacritics=0` tokenizer argument. /// - /// This option matches the `remove_diacritics=0` tokenizer argument. + /// Related SQLite documentation: case keep - /// Remove diacritics from Latin script characters. - /// - /// This option matches the `remove_diacritics=1` tokenizer argument. + /// Remove diacritics from Latin script characters. This option matches + /// the `remove_diacritics=1` tokenizer argument. case removeLegacy #if GRDBCUSTOMSQLITE - /// Remove diacritics from Latin script characters. - /// - /// This option matches the `remove_diacritics=2` tokenizer argument. + /// Remove diacritics from Latin script characters. This option matches + /// the `remove_diacritics=2` tokenizer argument. case remove #elseif !GRDBCIPHER - /// Remove diacritics from Latin script characters. - /// - /// This option matches the `remove_diacritics=2` tokenizer argument. - @available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) // SQLite 3.27+ + /// Remove diacritics from Latin script characters. This option matches + /// the `remove_diacritics=2` tokenizer argument. + @available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.27+ case remove #endif } @@ -178,3 +183,8 @@ public final class FTS3TableDefinition { columns.append(name) } } + +// Explicit non-conformance to Sendable: `FTS3TableDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension FTS3TableDefinition: Sendable { } diff --git a/GRDB/FTS/FTS3Pattern.swift b/GRDB/FTS/FTS3Pattern.swift index 9862566f2e..af6e82170d 100644 --- a/GRDB/FTS/FTS3Pattern.swift +++ b/GRDB/FTS/FTS3Pattern.swift @@ -16,7 +16,7 @@ /// - ``init(matchingAllTokensIn:)`` /// - ``init(matchingAnyTokenIn:)`` /// - ``init(matchingPhrase:)`` -public struct FTS3Pattern { +public struct FTS3Pattern: Sendable { /// The raw pattern string. /// /// It is guaranteed to be a valid FTS3/4 pattern. diff --git a/GRDB/FTS/FTS3TokenizerDescriptor.swift b/GRDB/FTS/FTS3TokenizerDescriptor.swift index 375ec7f532..14b2a3e81e 100644 --- a/GRDB/FTS/FTS3TokenizerDescriptor.swift +++ b/GRDB/FTS/FTS3TokenizerDescriptor.swift @@ -1,11 +1,11 @@ -/// An ``FTS3`` tokenizer. +/// The descriptor for an ``FTS3`` tokenizer. /// /// `FTS3TokenizerDescriptor` can be used in both ``FTS3`` and ``FTS4`` tables. /// /// For example: /// /// ```swift -/// db.create(virtualTable: "book", using: FTS4()) { t in +/// try db.create(virtualTable: "book", using: FTS4()) { t in /// t.tokenizer = .simple // FTS3TokenizerDescriptor /// } /// ``` @@ -14,13 +14,13 @@ /// /// ## Topics /// -/// ### Creating Tokenizers +/// ### Creating Tokenizer Descriptors /// /// - ``porter`` /// - ``simple`` /// - ``unicode61(diacritics:separators:tokenCharacters:)`` /// - ``FTS3/Diacritics`` -public struct FTS3TokenizerDescriptor { +public struct FTS3TokenizerDescriptor: Sendable { let name: String let arguments: [String] @@ -34,7 +34,7 @@ public struct FTS3TokenizerDescriptor { /// For example: /// /// ```swift - /// db.create(virtualTable: "book", using: FTS4()) { t in + /// try db.create(virtualTable: "book", using: FTS4()) { t in /// t.tokenizer = .simple /// } /// ``` @@ -47,7 +47,7 @@ public struct FTS3TokenizerDescriptor { /// For example: /// /// ```swift - /// db.create(virtualTable: "book", using: FTS4()) { t in + /// try db.create(virtualTable: "book", using: FTS4()) { t in /// t.tokenizer = .porter /// } /// ``` @@ -60,7 +60,7 @@ public struct FTS3TokenizerDescriptor { /// For example: /// /// ```swift - /// db.create(virtualTable: "book", using: FTS4()) { t in + /// try db.create(virtualTable: "book", using: FTS4()) { t in /// t.tokenizer = .unicode61() /// } /// ``` diff --git a/GRDB/FTS/FTS4.swift b/GRDB/FTS/FTS4.swift index ab5cbefefd..ef9a54f5e3 100644 --- a/GRDB/FTS/FTS4.swift +++ b/GRDB/FTS/FTS4.swift @@ -1,7 +1,14 @@ /// The virtual table module for the FTS4 full-text engine. /// /// To create FTS4 tables, use the ``Database`` method -/// ``Database/create(virtualTable:ifNotExists:using:_:)``. +/// ``Database/create(virtualTable:ifNotExists:using:_:)``: +/// +/// ```swift +/// // CREATE VIRTUAL TABLE document USING fts4(content) +/// try db.create(virtualTable: "document", using: FTS4()) { t in +/// t.column("content") +/// } +/// ``` /// /// Related SQLite documentation: /// @@ -62,7 +69,7 @@ extension FTS4: VirtualTableModule { switch definition.contentMode { case .raw(let content): - if let content = content { + if let content { arguments.append("content=\"\(content)\"") } case .synchronized(let contentTable): @@ -234,7 +241,7 @@ public final class FTS4TableDefinition { /// The FTS4 `prefix` option. /// /// // CREATE VIRTUAL TABLE document USING FTS4(content, prefix='2 4'); - /// db.create(virtualTable: "document", using:FTS4()) { t in + /// try db.create(virtualTable: "document", using:FTS4()) { t in /// t.prefixes = [2, 4] /// t.column("content") /// } @@ -300,12 +307,17 @@ public final class FTS4TableDefinition { /// try db.dropFTS4SynchronizationTriggers(forTable: "book_ft") /// ``` /// - /// Related SQLite documentation: + /// Related SQLite documentation: public func synchronize(withTable tableName: String) { contentMode = .synchronized(contentTable: tableName) } } +// Explicit non-conformance to Sendable: `FTS4TableDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension FTS4TableDefinition: Sendable { } + /// Describes a column in an ``FTS4`` virtual table. /// /// You get instances of `FTS4ColumnDefinition` when you create an ``FTS4`` @@ -370,6 +382,11 @@ public final class FTS4ColumnDefinition { } } +// Explicit non-conformance to Sendable: `FTS4ColumnDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension FTS4ColumnDefinition: Sendable { } + extension Database { /// Deletes the synchronization triggers for a synchronized FTS4 table. /// diff --git a/GRDB/FTS/FTS5.swift b/GRDB/FTS/FTS5.swift index 5c1c7fb362..a18da5a3f0 100644 --- a/GRDB/FTS/FTS5.swift +++ b/GRDB/FTS/FTS5.swift @@ -1,20 +1,50 @@ #if SQLITE_ENABLE_FTS5 import Foundation -/// FTS5 lets you define "fts5" virtual tables. +/// The virtual table module for the FTS5 full-text engine. /// -/// // CREATE VIRTUAL TABLE document USING fts5(content) -/// try db.create(virtualTable: "document", using: FTS5()) { t in -/// t.column("content") -/// } +/// To create FTS5 tables, use the ``Database`` method +/// ``Database/create(virtualTable:ifNotExists:using:_:)``: /// -/// See -public struct FTS5: VirtualTableModule { +/// ```swift +/// // CREATE VIRTUAL TABLE document USING fts5(content) +/// try db.create(virtualTable: "document", using: FTS5()) { t in +/// t.column("content") +/// } +/// ``` +/// +/// Related SQLite documentation: +/// +/// ## Topics +/// +/// ### The FTS5 Module +/// +/// - ``init()`` +/// - ``FTS5TableDefinition`` +/// - ``FTS5ColumnDefinition`` +/// - ``FTS5TokenizerDescriptor`` +/// +/// ### Full-Text Search Pattern +/// +/// - ``FTS5Pattern`` +/// +/// ### FTS5 Tokenizers +/// +/// - ``FTS5Tokenizer`` +/// - ``FTS5CustomTokenizer`` +/// - ``FTS5WrapperTokenizer`` +/// - ``FTS5TokenFlags`` +/// - ``FTS5Tokenization`` +/// +/// ### Low-Level FTS5 Customization +/// +/// - ``api(_:)`` +public struct FTS5 { /// Options for Latin script characters. Matches the raw "remove_diacritics" /// tokenizer argument. /// - /// See - public enum Diacritics { + /// Related SQLite documentation: + public enum Diacritics: Sendable { /// Do not remove diacritics from Latin script characters. This /// option matches the raw "remove_diacritics=0" tokenizer argument. case keep @@ -30,20 +60,23 @@ public struct FTS5: VirtualTableModule { /// Remove diacritics from Latin script characters. This /// option matches the raw "remove_diacritics=2" tokenizer argument, /// available from SQLite 3.27.0 - @available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) + @available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.27+ case remove #endif } - /// Creates a FTS5 module suitable for the Database - /// `create(virtualTable:using:)` method. + /// Creates an FTS5 module. /// - /// // CREATE VIRTUAL TABLE document USING fts5(content) - /// try db.create(virtualTable: "document", using: FTS5()) { t in - /// t.column("content") - /// } + /// For example: + /// + /// ```swift + /// // CREATE VIRTUAL TABLE document USING fts5(content) + /// try db.create(virtualTable: "document", using: FTS5()) { t in + /// t.column("content") + /// } + /// ``` /// - /// See + /// See ``Database/create(virtualTable:ifNotExists:using:_:)`` public init() { } // Support for FTS5Pattern initializers. Don't make public. Users tokenize @@ -71,10 +104,73 @@ public struct FTS5: VirtualTableModule { } } - // MARK: - VirtualTableModule Adoption + /// Returns a pointer to the `fts5_api` structure. + /// + /// Related SQLite documentation: + public static func api(_ db: Database) -> UnsafePointer { + // Access to FTS5 is one of the rare SQLite api which was broken in + // SQLite 3.20.0+, for security reasons: + // + // Starting SQLite 3.20.0+, we need to use the new sqlite3_bind_pointer api. + // The previous way to access FTS5 does not work any longer. + // + // So let's see which SQLite version we are linked against: + + #if GRDBCUSTOMSQLITE || GRDBCIPHER + // GRDB is linked against SQLCipher or a custom SQLite build: SQLite 3.20.0 or more. + return api_v2(db, sqlite3_prepare_v3, sqlite3_bind_pointer) + #else + // GRDB is linked against the system SQLite. + if #available(iOS 12, macOS 10.14, tvOS 12, watchOS 5, *) { // SQLite 3.20+ + return api_v2(db, sqlite3_prepare_v3, sqlite3_bind_pointer) + } else { + return api_v1(db) + } + #endif + } + private static func api_v1(_ db: Database) -> UnsafePointer { + guard let data = try! Data.fetchOne(db, sql: "SELECT fts5()") else { + fatalError("FTS5 is not available") + } + return data.withUnsafeBytes { + $0.bindMemory(to: UnsafePointer.self).first! + } + } + + // Technique given by Jordan Rose: + // https://forums.swift.org/t/c-interoperability-combinations-of-library-and-os-versions/14029/4 + private static func api_v2( + _ db: Database, + // swiftlint:disable:next line_length + _ sqlite3_prepare_v3: @convention(c) (OpaquePointer?, UnsafePointer?, CInt, CUnsignedInt, UnsafeMutablePointer?, UnsafeMutablePointer?>?) -> CInt, + // swiftlint:disable:next line_length + _ sqlite3_bind_pointer: @convention(c) (OpaquePointer?, CInt, UnsafeMutableRawPointer?, UnsafePointer?, (@convention(c) (UnsafeMutableRawPointer?) -> Void)?) -> CInt) + -> UnsafePointer + { + var statement: SQLiteStatement? = nil + var api: UnsafePointer? = nil + let type: StaticString = "fts5_api_ptr" + + let code = sqlite3_prepare_v3(db.sqliteConnection, "SELECT fts5(?)", -1, 0, &statement, nil) + guard code == SQLITE_OK else { + fatalError("FTS5 is not available") + } + defer { sqlite3_finalize(statement) } + type.utf8Start.withMemoryRebound(to: CChar.self, capacity: type.utf8CodeUnitCount) { typePointer in + _ = sqlite3_bind_pointer(statement, 1, &api, typePointer, nil) + } + sqlite3_step(statement) + guard let api else { + fatalError("FTS5 is not available") + } + return api + } +} + +extension FTS5: VirtualTableModule { /// The virtual table module name - public let moduleName = "fts5" + public var moduleName: String { "fts5" } /// Reserved; part of the VirtualTableModule protocol. /// @@ -114,11 +210,11 @@ public struct FTS5: VirtualTableModule { switch definition.contentMode { case let .raw(content, contentRowID): - if let content = content { + if let content { let quotedContent = try content.sqlExpression.quotedSQL(db) arguments.append("content=\(quotedContent)") } - if let contentRowID = contentRowID { + if let contentRowID { let quotedContentRowID = try contentRowID.sqlExpression.quotedSQL(db) arguments.append("content_rowid=\(quotedContentRowID)") } @@ -203,82 +299,38 @@ public struct FTS5: VirtualTableModule { try db.execute(sql: "INSERT INTO \(ftsTable)(\(ftsTable)) VALUES('rebuild')") } } - - static func api(_ db: Database) -> UnsafePointer { - // Access to FTS5 is one of the rare SQLite api which was broken in - // SQLite 3.20.0+, for security reasons: - // - // Starting SQLite 3.20.0+, we need to use the new sqlite3_bind_pointer api. - // The previous way to access FTS5 does not work any longer. - // - // So let's see which SQLite version we are linked against: - - #if GRDBCUSTOMSQLITE || GRDBCIPHER - // GRDB is linked against SQLCipher or a custom SQLite build: SQLite 3.20.0 or more. - return api_v2(db, sqlite3_prepare_v3, sqlite3_bind_pointer) - #else - // GRDB is linked against the system SQLite. - // - // Do we use SQLite 3.19.3 (iOS 11.4), or SQLite 3.24.0 (iOS 12.0)? - if #available(iOS 12.0, OSX 10.14, tvOS 12.0, watchOS 5.0, *) { - // SQLite 3.24.0 or more - return api_v2(db, sqlite3_prepare_v3, sqlite3_bind_pointer) - } else { - // SQLite 3.19.3 or less - return api_v1(db) - } - #endif - } - - private static func api_v1(_ db: Database) -> UnsafePointer { - guard let data = try! Data.fetchOne(db, sql: "SELECT fts5()") else { - fatalError("FTS5 is not available") - } - return data.withUnsafeBytes { - $0.bindMemory(to: UnsafePointer.self).first! - } - } - - // Technique given by Jordan Rose: - // https://forums.swift.org/t/c-interoperability-combinations-of-library-and-os-versions/14029/4 - private static func api_v2( - _ db: Database, - // swiftlint:disable:next line_length - _ sqlite3_prepare_v3: @convention(c) (OpaquePointer?, UnsafePointer?, CInt, CUnsignedInt, UnsafeMutablePointer?, UnsafeMutablePointer?>?) -> CInt, - // swiftlint:disable:next line_length - _ sqlite3_bind_pointer: @convention(c) (OpaquePointer?, CInt, UnsafeMutableRawPointer?, UnsafePointer?, (@convention(c) (UnsafeMutableRawPointer?) -> Void)?) -> CInt) - -> UnsafePointer - { - var statement: SQLiteStatement? = nil - var api: UnsafePointer? = nil - let type: StaticString = "fts5_api_ptr" - - let code = sqlite3_prepare_v3(db.sqliteConnection, "SELECT fts5(?)", -1, 0, &statement, nil) - guard code == SQLITE_OK else { - fatalError("FTS5 is not available") - } - defer { sqlite3_finalize(statement) } - type.utf8Start.withMemoryRebound(to: Int8.self, capacity: type.utf8CodeUnitCount) { typePointer in - _ = sqlite3_bind_pointer(statement, 1, &api, typePointer, nil) - } - sqlite3_step(statement) - guard let api else { - fatalError("FTS5 is not available") - } - return api - } } -/// The FTS5TableDefinition class lets you define columns of a FTS5 virtual table. +/// A `FTS5TableDefinition` lets you define the components of an FTS5 +/// virtual table. +/// +/// You don't create instances of this class. Instead, you use the `Database` +/// ``Database/create(virtualTable:ifNotExists:using:_:)`` method: +/// +/// ```swift +/// try db.create(virtualTable: "document", using: FTS5()) { t in // t is FTS5TableDefinition +/// t.column("content") +/// } +/// ``` /// -/// You don't create instances of this class. Instead, you use the Database -/// `create(virtualTable:using:)` method: +/// ## Topics /// -/// try db.create(virtualTable: "document", using: FTS5()) { t in // t is FTS5TableDefinition -/// t.column("content") -/// } +/// ### Define Columns /// -/// See +/// - ``column(_:)`` +/// +/// ### External Content Tables +/// +/// - ``synchronize(withTable:)`` +/// +/// ### FTS5 Options +/// +/// - ``columnSize`` +/// - ``content`` +/// - ``contentRowID`` +/// - ``detail`` +/// - ``prefixes`` +/// - ``tokenizer`` public final class FTS5TableDefinition { enum ContentMode { case raw(content: String?, contentRowID: String?) @@ -289,25 +341,30 @@ public final class FTS5TableDefinition { fileprivate var columns: [FTS5ColumnDefinition] = [] fileprivate var contentMode: ContentMode = .raw(content: nil, contentRowID: nil) - /// The virtual table tokenizer + /// The virtual table tokenizer. /// - /// try db.create(virtualTable: "document", using: FTS5()) { t in - /// t.tokenizer = .porter() - /// } + /// For example: /// - /// See + /// ```swift + /// // CREATE VIRTUAL TABLE "documents" USING fts5(tokenize=porter) + /// try db.create(virtualTable: "document", using: FTS5()) { t in + /// t.tokenizer = .porter() + /// } + /// ``` + /// + /// Related SQLite documentation: public var tokenizer: FTS5TokenizerDescriptor? - /// The FTS5 `content` option + /// The FTS5 `content` option. /// /// When you want the full-text table to be synchronized with the - /// content of an external table, prefer the `synchronize(withTable:)` - /// method. + /// content of an external table, prefer the + /// ``synchronize(withTable:)`` method. /// /// Setting this property invalidates any synchronization previously - /// established with the `synchronize(withTable:)` method. + /// established with the ``synchronize(withTable:)`` method. /// - /// See + /// Related SQLite documentation: public var content: String? { get { switch contentMode { @@ -330,13 +387,13 @@ public final class FTS5TableDefinition { /// The FTS5 `content_rowid` option /// /// When you want the full-text table to be synchronized with the - /// content of an external table, prefer the `synchronize(withTable:)` - /// method. + /// content of an external table, prefer the + /// ``synchronize(withTable:)`` method. /// /// Setting this property invalidates any synchronization previously - /// established with the `synchronize(withTable:)` method. + /// established with the ``synchronize(withTable:)`` method. /// - /// See + /// Related SQLite documentation: public var contentRowID: String? { get { switch contentMode { @@ -356,19 +413,19 @@ public final class FTS5TableDefinition { } } - /// Support for the FTS5 `prefix` option + /// The FTS5 `prefix` option. /// - /// See + /// Related SQLite documentation: public var prefixes: Set? - /// Support for the FTS5 `columnsize` option + /// The FTS5 `columnsize` option. /// - /// + /// Related SQLite documentation: public var columnSize: Int? - /// Support for the FTS5 `detail` option + /// The FTS5 `detail` option. /// - /// + /// Related SQLite documentation: public var detail: String? init(configuration: VirtualTableConfiguration) { @@ -377,11 +434,18 @@ public final class FTS5TableDefinition { /// Appends a table column. /// - /// try db.create(virtualTable: "document", using: FTS5()) { t in - /// t.column("content") - /// } + /// For example: + /// + /// ```swift + /// // CREATE VIRTUAL TABLE document USING fts5(content) + /// try db.create(virtualTable: "document", using: FTS5()) { t in + /// t.column("content") + /// } + /// ``` /// /// - parameter name: the column name. + /// - returns: A ``FTS5ColumnDefinition`` that allows you to refine the + /// column definition. @discardableResult public func column(_ name: String) -> FTS5ColumnDefinition { let column = FTS5ColumnDefinition(name: name) @@ -396,22 +460,55 @@ public final class FTS5TableDefinition { /// content in the external table. SQL triggers make sure that the /// full-text table is kept up to date with the external table. /// - /// See + /// SQLite automatically deletes those triggers when the content + /// (not full-text) table is dropped. + /// + /// However, those triggers remain after the full-text table has been + /// dropped. Unless they are dropped too, they will prevent future + /// insertion, updates, and deletions in the content table, and the creation + /// of a new full-text table. + /// + /// To drop those triggers, call the `Database` + /// ``Database/dropFTS5SynchronizationTriggers(forTable:)`` method: + /// + /// ```swift + /// // Create tables + /// try db.create(table: "book") { t in + /// ... + /// } + /// try db.create(virtualTable: "book_ft", using: FTS5()) { t in + /// t.synchronize(withTable: "book") + /// ... + /// } + /// + /// // Drop full-text table + /// try db.drop(table: "book_ft") + /// try db.dropFTS5SynchronizationTriggers(forTable: "book_ft") + /// ``` + /// + /// Related SQLite documentation: public func synchronize(withTable tableName: String) { contentMode = .synchronized(contentTable: tableName) } } -/// The FTS5ColumnDefinition class lets you refine a column of an FTS5 -/// virtual table. +// Explicit non-conformance to Sendable: `FTS5TableDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension FTS5TableDefinition: Sendable { } + +/// Describes a column in an ``FTS5`` virtual table. /// -/// You get instances of this class when you create an FTS5 table: +/// You get instances of `FTS5ColumnDefinition` when you create an ``FTS5`` +/// virtual table. For example: /// -/// try db.create(virtualTable: "document", using: FTS5()) { t in -/// t.column("content") // FTS5ColumnDefinition -/// } +/// ```swift +/// try db.create(virtualTable: "document", using: FTS5()) { t in +/// t.column("content") // FTS5ColumnDefinition +/// } +/// ``` /// -/// See +/// Related SQLite documentation: public final class FTS5ColumnDefinition { fileprivate let name: String fileprivate var isIndexed: Bool @@ -423,14 +520,18 @@ public final class FTS5ColumnDefinition { /// Excludes the column from the full-text index. /// - /// try db.create(virtualTable: "document", using: FTS5()) { t in - /// t.column("a") - /// t.column("b").notIndexed() - /// } + /// For example: + /// + /// ```swift + /// try db.create(virtualTable: "document", using: FTS5()) { t in + /// t.column("a") + /// t.column("b").notIndexed() + /// } + /// ``` /// - /// See + /// Related SQLite documentation: /// - /// - returns: Self so that you can further refine the column definition. + /// - returns: `self` so that you can further refine the column definition. @discardableResult public func notIndexed() -> Self { self.isIndexed = false @@ -438,8 +539,13 @@ public final class FTS5ColumnDefinition { } } +// Explicit non-conformance to Sendable: `FTS5ColumnDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension FTS5ColumnDefinition: Sendable { } + extension Column { - /// The FTS5 rank column + /// The ``FTS5`` rank column. public static let rank = Column("rank") } diff --git a/GRDB/FTS/FTS5CustomTokenizer.swift b/GRDB/FTS/FTS5CustomTokenizer.swift index 313d2e850a..a294c35b3e 100644 --- a/GRDB/FTS/FTS5CustomTokenizer.swift +++ b/GRDB/FTS/FTS5CustomTokenizer.swift @@ -1,9 +1,13 @@ #if SQLITE_ENABLE_FTS5 - -/// The protocol for custom FTS5 tokenizers. +/// A type that implements a custom tokenizer for the ``FTS5`` full-text engine. +/// +/// See [FTS5 Tokenizers](https://github.com/groue/GRDB.swift/blob/master/Documentation/FTS5Tokenizers.md) +/// for more information. public protocol FTS5CustomTokenizer: FTS5Tokenizer { - /// The name of the tokenizer; should uniquely identify your custom - /// tokenizer. + /// The name of the tokenizer. + /// + /// The name should uniquely identify the tokenizer: don't use a built-in + /// name such as `ascii`, `porter` or `unicode61`. static var name: String { get } /// Creates a custom tokenizer. @@ -27,7 +31,7 @@ extension FTS5CustomTokenizer { /// /// class MyTokenizer : FTS5CustomTokenizer { ... } /// - /// db.create(virtualTable: "book", using: FTS5()) { t in + /// try db.create(virtualTable: "book", using: FTS5()) { t in /// let tokenizer = MyTokenizer.tokenizerDescriptor(arguments: ["unicode61", "remove_diacritics", "0"]) /// t.tokenizer = tokenizer /// } @@ -57,7 +61,7 @@ extension Database { /// /// class MyTokenizer : FTS5CustomTokenizer { ... } /// db.add(tokenizer: MyTokenizer.self) - public func add(tokenizer: Tokenizer.Type) { + public func add(tokenizer: (some FTS5CustomTokenizer).Type) { let api = FTS5.api(self) // Swift won't let the @convention(c) xCreate() function below create @@ -72,7 +76,7 @@ extension Database { return SQLITE_ERROR } do { - let tokenizer = try Tokenizer(db: db, arguments: arguments) + let tokenizer = try tokenizer.init(db: db, arguments: arguments) // Tokenizer must remain alive until xDeleteTokenizer() // is called, as the xDelete member of xTokenizer @@ -108,7 +112,7 @@ extension Database { } let constructor = Unmanaged.fromOpaque(constructorPointer).takeUnretainedValue() var arguments: [String] = [] - if let azArg = azArg { + if let azArg { for i in 0..?, + pText: UnsafePointer?, nText: CInt, // swiftlint:disable:next line_length - tokenCallback: (@convention(c) (UnsafeMutableRawPointer?, CInt, UnsafePointer?, CInt, CInt, CInt) -> CInt)?) + tokenCallback: (@convention(c) (UnsafeMutableRawPointer?, CInt, UnsafePointer?, CInt, CInt, CInt) -> CInt)?) -> CInt { guard let tokenizerPointer else { @@ -154,7 +158,7 @@ extension Database { let code = withUnsafeMutablePointer(to: &xTokenizer) { xTokenizerPointer in api.pointee.xCreateTokenizer( UnsafeMutablePointer(mutating: api), - Tokenizer.name, + tokenizer.name, constructorPointer, xTokenizerPointer, deleteConstructor) diff --git a/GRDB/FTS/FTS5Pattern.swift b/GRDB/FTS/FTS5Pattern.swift index c9ac77aa16..045a409a1b 100644 --- a/GRDB/FTS/FTS5Pattern.swift +++ b/GRDB/FTS/FTS5Pattern.swift @@ -1,17 +1,40 @@ #if SQLITE_ENABLE_FTS5 -/// A full text pattern for querying FTS5 virtual tables. -public struct FTS5Pattern { +/// A full text pattern for querying ``FTS5`` virtual tables. +/// +/// Related SQLite documentation: +/// +/// ## Topics +/// +/// ### Creating Raw FTS5 Patterns +/// +/// - ``Database/makeFTS5Pattern(rawPattern:forTable:)`` +/// +/// ### Creating FTS5 Patterns from User Input +/// +/// - ``init(matchingAllPrefixesIn:)`` +/// - ``init(matchingAllTokensIn:)`` +/// - ``init(matchingAnyTokenIn:)`` +/// - ``init(matchingPhrase:)`` +/// - ``init(matchingPrefixPhrase:)`` +public struct FTS5Pattern: Sendable { - /// The raw pattern string. Guaranteed to be a valid FTS5 pattern. + /// The raw pattern string. + /// + /// It is guaranteed to be a valid FTS5 pattern. public let rawPattern: String - /// Creates a pattern that matches any token found in the input string; - /// returns nil if no pattern could be built. + /// Creates a pattern that matches any token found in the input string. /// - /// FTS5Pattern(matchingAnyTokenIn: "") // nil - /// FTS5Pattern(matchingAnyTokenIn: "foo bar") // foo OR bar + /// The result is nil if no pattern could be built. /// - /// - parameter string: The string to turn into an FTS5 pattern + /// For example: + /// + /// ```swift + /// FTS5Pattern(matchingAnyTokenIn: "") // nil + /// FTS5Pattern(matchingAnyTokenIn: "foo bar") // foo OR bar + /// ``` + /// + /// - parameter string: The string to turn into an FTS5 pattern. public init?(matchingAnyTokenIn string: String) { guard let tokens = try? FTS5.tokenize(query: string), !tokens.isEmpty else { return nil @@ -19,13 +42,18 @@ public struct FTS5Pattern { try? self.init(rawPattern: tokens.joined(separator: " OR ")) } - /// Creates a pattern that matches all tokens found in the input string; - /// returns nil if no pattern could be built. + /// Creates a pattern that matches all tokens found in the input string. /// - /// FTS5Pattern(matchingAllTokensIn: "") // nil - /// FTS5Pattern(matchingAllTokensIn: "foo bar") // foo bar + /// The result is nil if no pattern could be built. /// - /// - parameter string: The string to turn into an FTS5 pattern + /// For example: + /// + /// ```swift + /// FTS5Pattern(matchingAllTokensIn: "") // nil + /// FTS5Pattern(matchingAllTokensIn: "foo bar") // foo bar + /// ``` + /// + /// - parameter string: The string to turn into an FTS5 pattern. public init?(matchingAllTokensIn string: String) { guard let tokens = try? FTS5.tokenize(query: string), !tokens.isEmpty else { return nil @@ -34,12 +62,18 @@ public struct FTS5Pattern { } /// Creates a pattern that matches all token prefixes found in the input - /// string; returns nil if no pattern could be built. + /// string. + /// + /// The result is nil if no pattern could be built. /// - /// FTS3Pattern(matchingAllTokensIn: "") // nil - /// FTS3Pattern(matchingAllTokensIn: "foo bar") // foo* bar* + /// For example: /// - /// - parameter string: The string to turn into an FTS3 pattern + /// ```swift + /// FTS5Pattern(matchingAllTokensIn: "") // nil + /// FTS5Pattern(matchingAllTokensIn: "foo bar") // foo* bar* + /// ``` + /// + /// - parameter string: The string to turn into an FTS5 pattern. public init?(matchingAllPrefixesIn string: String) { guard let tokens = try? FTS5.tokenize(query: string), !tokens.isEmpty else { return nil @@ -47,13 +81,18 @@ public struct FTS5Pattern { try? self.init(rawPattern: tokens.map { "\($0)*" }.joined(separator: " ")) } - /// Creates a pattern that matches a contiguous string; returns nil if no - /// pattern could be built. + /// Creates a pattern that matches a contiguous string. /// - /// FTS5Pattern(matchingPhrase: "") // nil - /// FTS5Pattern(matchingPhrase: "foo bar") // "foo bar" + /// The result is nil if no pattern could be built. /// - /// - parameter string: The string to turn into an FTS5 pattern + /// For example: + /// + /// ```swift + /// FTS5Pattern(matchingPhrase: "") // nil + /// FTS5Pattern(matchingPhrase: "foo bar") // "foo bar" + /// ``` + /// + /// - parameter string: The string to turn into an FTS5 pattern. public init?(matchingPhrase string: String) { guard let tokens = try? FTS5.tokenize(query: string), !tokens.isEmpty else { return nil @@ -61,14 +100,20 @@ public struct FTS5Pattern { try? self.init(rawPattern: "\"" + tokens.joined(separator: " ") + "\"") } - /// Creates a pattern that matches the prefix of an indexed document; - /// returns nil if no pattern could be built. + /// Creates a pattern that matches the prefix of an indexed document. /// - /// FTS5Pattern(matchingPrefixPhrase: "") // nil - /// FTS5Pattern(matchingPrefixPhrase: "the word") // ^"the word" + /// The result is nil if no pattern could be built. /// - /// This pattern matches a prefix made of full tokens: "the bat" matches - /// "the bat is happy", but not "mind the bat", or "the batcave is dark". + /// The returned pattern matches a prefix made of full tokens: "the bat" + /// matches "the bat is happy", but not "mind the bat", or "the batcave + /// is dark". + /// + /// For example: + /// + /// ```swift + /// FTS5Pattern(matchingPrefixPhrase: "") // nil + /// FTS5Pattern(matchingPrefixPhrase: "the word") // ^"the word" + /// ``` /// /// - parameter string: The string to turn into an FTS5 pattern public init?(matchingPrefixPhrase string: String) { @@ -113,13 +158,26 @@ extension Database { // MARK: - FTS5 - /// Creates a pattern from a raw pattern string; throws DatabaseError on - /// invalid syntax. + /// Creates an FTS5 pattern from a raw pattern string. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.read { db in + /// // OK + /// let pattern = try db.makeFTS5Pattern(rawPattern: "and", forTable: "document") /// - /// The pattern syntax is documented at + /// // Throws error: malformed MATCH expression: [AND] + /// let pattern = try db.makeFTS5Pattern(rawPattern: "AND", forTable: "document") + /// } + /// ``` /// - /// try db.makeFTS5Pattern(rawPattern: "and", forTable: "document") // OK - /// try db.makeFTS5Pattern(rawPattern: "AND", forTable: "document") // malformed MATCH expression: [AND] + /// - parameter rawPattern: A pattern that follows the + /// [Full-text Query Syntax](https://www.sqlite.org/fts5.html#full_text_query_syntax). + /// - parameter table: The full-text table that the pattern is intended to + /// match against. + /// - returns: A valid FTS5 pattern. + /// - throws: A ``DatabaseError`` if the raw pattern is invalid. public func makeFTS5Pattern(rawPattern: String, forTable table: String) throws -> FTS5Pattern { try FTS5Pattern(rawPattern: rawPattern, allowedColumns: columns(in: table).map(\.name)) } diff --git a/GRDB/FTS/FTS5Tokenizer.swift b/GRDB/FTS/FTS5Tokenizer.swift index 74d34b6c96..8609a5ad72 100644 --- a/GRDB/FTS/FTS5Tokenizer.swift +++ b/GRDB/FTS/FTS5Tokenizer.swift @@ -3,11 +3,11 @@ import Foundation /// A low-level SQLite function that lets FTS5Tokenizer notify tokens. /// -/// See FTS5Tokenizer.tokenize(context:flags:pText:nText:tokenCallback:) +/// See ``FTS5Tokenizer/tokenize(context:tokenization:pText:nText:tokenCallback:)``. public typealias FTS5TokenCallback = @convention(c) ( _ context: UnsafeMutableRawPointer?, _ flags: CInt, - _ pToken: UnsafePointer?, + _ pToken: UnsafePointer?, _ nToken: CInt, _ iStart: CInt, _ iEnd: CInt) @@ -15,28 +15,50 @@ public typealias FTS5TokenCallback = @convention(c) ( /// The reason why FTS5 is requesting tokenization. /// -/// See -public struct FTS5Tokenization: OptionSet { +/// See the `FTS5_TOKENIZE_*` constants in . +public struct FTS5Tokenization: OptionSet, Sendable { public let rawValue: CInt public init(rawValue: CInt) { self.rawValue = rawValue } - /// FTS5_TOKENIZE_QUERY + /// `FTS5_TOKENIZE_QUERY` public static let query = FTS5Tokenization(rawValue: FTS5_TOKENIZE_QUERY) - /// FTS5_TOKENIZE_PREFIX + /// `FTS5_TOKENIZE_PREFIX` public static let prefix = FTS5Tokenization(rawValue: FTS5_TOKENIZE_PREFIX) - /// FTS5_TOKENIZE_DOCUMENT + /// `FTS5_TOKENIZE_DOCUMENT` public static let document = FTS5Tokenization(rawValue: FTS5_TOKENIZE_DOCUMENT) - /// FTS5_TOKENIZE_AUX + /// `FTS5_TOKENIZE_AUX` public static let aux = FTS5Tokenization(rawValue: FTS5_TOKENIZE_AUX) } -/// The protocol for FTS5 tokenizers +/// A type that implements a tokenizer for the ``FTS5`` full-text engine. +/// +/// You can instantiate tokenizers, including +/// [built-in tokenizers](https://www.sqlite.org/fts5.html#tokenizers), +/// with the ``Database/makeTokenizer(_:)`` method: +/// +/// ```swift +/// try dbQueue.read { db in +/// let unicode61 = try db.makeTokenizer(.unicode61()) // FTS5Tokenizer +/// } +/// ``` +/// +/// See [FTS5 Tokenizers](https://github.com/groue/GRDB.swift/blob/master/Documentation/FTS5Tokenizers.md) +/// for more information. +/// +/// ## Topics +/// +/// ### Tokenizing Text +/// +/// - ``tokenize(document:)`` +/// - ``tokenize(query:)`` +/// - ``tokenize(context:tokenization:pText:nText:tokenCallback:)`` +/// - ``FTS5TokenCallback`` public protocol FTS5Tokenizer: AnyObject { /// Tokenizes the text described by `pText` and `nText`, and /// notifies found tokens to the `tokenCallback` function. @@ -55,7 +77,7 @@ public protocol FTS5Tokenizer: AnyObject { func tokenize( context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, - pText: UnsafePointer?, + pText: UnsafePointer?, nText: CInt, tokenCallback: @escaping FTS5TokenCallback) -> CInt @@ -72,8 +94,10 @@ extension FTS5Tokenizer { /// /// For example: /// - /// let tokenizer = try db.makeTokenizer(.ascii()) - /// try tokenizer.tokenize(document: "foo bar") // [("foo", flags), ("bar", flags)] + /// ```swift + /// let tokenizer = try db.makeTokenizer(.ascii()) + /// try tokenizer.tokenize(document: "foo bar") // [("foo", flags), ("bar", flags)] + /// ``` /// /// See also `tokenize(query:)`. /// @@ -88,8 +112,10 @@ extension FTS5Tokenizer { /// /// For example: /// - /// let tokenizer = try db.makeTokenizer(.ascii()) - /// try tokenizer.tokenize(query: "foo bar") // [("foo", flags), ("bar", flags)] + /// ```swift + /// let tokenizer = try db.makeTokenizer(.ascii()) + /// try tokenizer.tokenize(query: "foo bar") // [("foo", flags), ("bar", flags)] + /// ``` /// /// See also `tokenize(document:)`. /// @@ -117,7 +143,7 @@ extension FTS5Tokenizer { guard let addr = buffer.baseAddress else { return [] } - let pText = UnsafeMutableRawPointer(mutating: addr).assumingMemoryBound(to: Int8.self) + let pText = UnsafeMutableRawPointer(mutating: addr).assumingMemoryBound(to: CChar.self) let nText = CInt(buffer.count) var context = TokenizeContext() @@ -180,8 +206,8 @@ extension Database { } else { func withArrayOfCStrings( _ input: [String], - _ output: inout ContiguousArray>, - _ accessor: (ContiguousArray>) -> Result) + _ output: inout ContiguousArray>, + _ accessor: (ContiguousArray>) -> Result) -> Result { if output.count == input.count { @@ -193,7 +219,7 @@ extension Database { } } } - var cStrings = ContiguousArray>() + var cStrings = ContiguousArray>() cStrings.reserveCapacity(arguments.count) code = withArrayOfCStrings(arguments, &cStrings) { (cStrings) in cStrings.withUnsafeBufferPointer { azArg in @@ -210,7 +236,7 @@ extension Database { throw DatabaseError(resultCode: code, message: "failed fts5_tokenizer.xCreate") } - if let tokenizerPointer = tokenizerPointer { + if let tokenizerPointer { self.tokenizerPointer = tokenizerPointer } else { throw DatabaseError(resultCode: code, message: "nil tokenizer") @@ -226,7 +252,7 @@ extension Database { func tokenize( context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, - pText: UnsafePointer?, + pText: UnsafePointer?, nText: CInt, tokenCallback: @escaping FTS5TokenCallback) -> CInt @@ -240,20 +266,27 @@ extension Database { /// Creates an FTS5 tokenizer, given its descriptor. /// - /// let unicode61 = try db.makeTokenizer(.unicode61()) + /// For example: /// - /// It is a programmer error to use the tokenizer outside of a protected - /// database queue, or after the database has been closed. + /// ```swift + /// let unicode61 = try db.makeTokenizer(.unicode61()) + /// ``` /// - /// Use this method when you implement a custom wrapper tokenizer: + /// You can use this method when you implement a custom wrapper tokenizer + /// with ``FTS5WrapperTokenizer``: /// - /// final class MyTokenizer : FTS5WrapperTokenizer { - /// var wrappedTokenizer: FTS5Tokenizer + /// ```swift + /// final class MyTokenizer : FTS5WrapperTokenizer { + /// var wrappedTokenizer: FTS5Tokenizer /// - /// init(db: Database, arguments: [String]) throws { - /// wrappedTokenizer = try db.makeTokenizer(.unicode61()) - /// } + /// init(db: Database, arguments: [String]) throws { + /// wrappedTokenizer = try db.makeTokenizer(.unicode61()) /// } + /// } + /// ``` + /// + /// It is a programmer error to use the tokenizer outside of a protected + /// database queue, or after the database has been closed. public func makeTokenizer(_ descriptor: FTS5TokenizerDescriptor) throws -> any FTS5Tokenizer { let api = FTS5.api(self) diff --git a/GRDB/FTS/FTS5TokenizerDescriptor.swift b/GRDB/FTS/FTS5TokenizerDescriptor.swift index 83d746f8a7..9750aa76fb 100644 --- a/GRDB/FTS/FTS5TokenizerDescriptor.swift +++ b/GRDB/FTS/FTS5TokenizerDescriptor.swift @@ -1,61 +1,109 @@ #if SQLITE_ENABLE_FTS5 -/// An FTS5 tokenizer, suitable for FTS5 table definitions: +/// The descriptor for an ``FTS5`` tokenizer. /// -/// db.create(virtualTable: "book", using: FTS5()) { t in -/// t.tokenizer = .unicode61() // FTS5TokenizerDescriptor -/// } +/// For example: /// -/// See -public struct FTS5TokenizerDescriptor { - /// The tokenizer components +/// ```swift +/// try db.create(virtualTable: "book", using: FTS5()) { t in +/// t.tokenizer = .unicode61() // FTS5TokenizerDescriptor +/// } +/// ``` +/// +/// Related SQLite documentation: +/// +/// ## Topics +/// +/// ### Creating Tokenizer Descriptors +/// +/// - ``init(components:)`` +/// - ``ascii(separators:tokenCharacters:)`` +/// - ``porter(wrapping:)`` +/// - ``unicode61(diacritics:categories:separators:tokenCharacters:)`` +/// - ``FTS5/Diacritics`` +/// +/// ### Instantiating Tokenizers +/// +/// - ``Database/makeTokenizer(_:)`` +public struct FTS5TokenizerDescriptor: Sendable { + /// The tokenizer components. + /// + /// For example: /// - /// // ["unicode61"] - /// FTS5TokenizerDescriptor.unicode61().components + /// ```swift + /// // ["unicode61"] + /// FTS5TokenizerDescriptor.unicode61().components /// - /// // ["unicode61", "remove_diacritics", "0"] - /// FTS5TokenizerDescriptor.unicode61(removeDiacritics: false)).components + /// // ["unicode61", "remove_diacritics", "0"] + /// FTS5TokenizerDescriptor.unicode61(removeDiacritics: false)).components + /// ``` public let components: [String] - /// The tokenizer name + /// The tokenizer name. + /// + /// For example: /// - /// // "unicode61" - /// FTS5TokenizerDescriptor.unicode61().name + /// ```swift + /// // "unicode61" + /// FTS5TokenizerDescriptor.unicode61().name /// - /// // "unicode61" - /// FTS5TokenizerDescriptor.unicode61(removeDiacritics: false)).name + /// // "unicode61" + /// FTS5TokenizerDescriptor.unicode61(removeDiacritics: false)).name + /// ``` var name: String { components[0] } + /// The tokenizer arguments. + /// + /// For example: + /// + /// ```swift + /// // [] + /// FTS5TokenizerDescriptor.unicode61().components + /// + /// // ["remove_diacritics", "0"] + /// FTS5TokenizerDescriptor.unicode61(removeDiacritics: false)).components + /// ``` var arguments: [String] { Array(components.suffix(from: 1)) } /// Creates an FTS5 tokenizer descriptor. /// - /// db.create(virtualTable: "book", using: FTS5()) { t in - /// let tokenizer = FTS5TokenizerDescriptor(components: ["porter", "unicode61", "remove_diacritics", "0"]) - /// t.tokenizer = tokenizer - /// } + /// For example: + /// + /// ```swift + /// try db.create(virtualTable: "book", using: FTS5()) { t in + /// t.tokenizer = FTS5TokenizerDescriptor(components: [ + /// "porter", + /// "unicode61", + /// "remove_diacritics", + /// "0"]) + /// } + /// ``` /// - /// - precondition: Components is not empty + /// - precondition: Components is not empty. public init(components: [String]) { GRDBPrecondition(!components.isEmpty, "FTS5TokenizerDescriptor requires at least one component") assert(!components.isEmpty) self.components = components } - /// The "ascii" tokenizer + /// The "ascii" tokenizer. + /// + /// For example: /// - /// db.create(virtualTable: "book", using: FTS5()) { t in - /// t.tokenizer = .ascii() - /// } + /// ```swift + /// try db.create(virtualTable: "book", using: FTS5()) { t in + /// t.tokenizer = .ascii() + /// } + /// ``` + /// + /// Related SQLite documentation: /// /// - parameters: /// - separators: Unless empty (the default), SQLite will consider /// these characters as token separators. /// - tokenCharacters: Unless empty (the default), SQLite will /// consider these characters as token characters. - /// - /// See public static func ascii( separators: Set = [], tokenCharacters: Set = []) @@ -78,30 +126,39 @@ public struct FTS5TokenizerDescriptor { return FTS5TokenizerDescriptor(components: components) } - /// The "porter" tokenizer + /// The "porter" tokenizer. /// - /// db.create(virtualTable: "book", using: FTS5()) { t in - /// t.tokenizer = .porter() - /// } + /// For example: /// - /// - parameters: - /// - base: An eventual wrapping tokenizer which replaces the - // default unicode61() base tokenizer. + /// ```swift + /// try db.create(virtualTable: "book", using: FTS5()) { t in + /// t.tokenizer = .porter() + /// } + /// ``` + /// + /// Related SQLite documentation: /// - /// See + /// - parameter base: An eventual wrapping tokenizer which replaces the + /// default unicode61() base tokenizer. public static func porter(wrapping base: FTS5TokenizerDescriptor? = nil) -> FTS5TokenizerDescriptor { - if let base = base { + if let base { return FTS5TokenizerDescriptor(components: ["porter"] + base.components) } else { return FTS5TokenizerDescriptor(components: ["porter"]) } } - /// An "unicode61" tokenizer + /// The "unicode61" tokenizer. + /// + /// For example: /// - /// db.create(virtualTable: "book", using: FTS5()) { t in - /// t.tokenizer = .unicode61() - /// } + /// ```swift + /// try db.create(virtualTable: "book", using: FTS5()) { t in + /// t.tokenizer = .unicode61() + /// } + /// ``` + /// + /// Related SQLite documentation: /// /// - parameters: /// - diacritics: By default SQLite will strip diacritics from @@ -112,8 +169,6 @@ public struct FTS5TokenizerDescriptor { /// these characters as token separators. /// - tokenCharacters: Unless empty (the default), SQLite will /// consider these characters as token characters. - /// - /// See public static func unicode61( diacritics: FTS5.Diacritics = .removeLegacy, categories: String = "", diff --git a/GRDB/FTS/FTS5WrapperTokenizer.swift b/GRDB/FTS/FTS5WrapperTokenizer.swift index 4f804dc5e6..16ac1e1f96 100644 --- a/GRDB/FTS/FTS5WrapperTokenizer.swift +++ b/GRDB/FTS/FTS5WrapperTokenizer.swift @@ -3,15 +3,15 @@ import Foundation /// Flags that tell SQLite how to register a token. /// -/// See the `FTS5_TOKEN_*` flags in . -public struct FTS5TokenFlags: OptionSet { +/// See the `FTS5_TOKEN_*` constants in . +public struct FTS5TokenFlags: OptionSet, Sendable { public let rawValue: CInt public init(rawValue: CInt) { self.rawValue = rawValue } - /// FTS5_TOKEN_COLOCATED + /// `FTS5_TOKEN_COLOCATED` public static let colocated = FTS5TokenFlags(rawValue: FTS5_TOKEN_COLOCATED) } @@ -20,34 +20,18 @@ public struct FTS5TokenFlags: OptionSet { /// See FTS5WrapperTokenizer.accept(token:flags:tokenCallback:) public typealias FTS5WrapperTokenCallback = (_ token: String, _ flags: FTS5TokenFlags) throws -> Void -/// The protocol for custom FTS5 tokenizers that wrap another tokenizer. +/// A type that implements a custom tokenizer for the ``FTS5`` full-text engine +/// by wrapping another tokenizer. /// -/// Types that adopt FTS5WrapperTokenizer don't have to implement the -/// low-level FTS5Tokenizer.tokenize(context:flags:pText:nText:tokenCallback:). +/// See [FTS5 Tokenizers](https://github.com/groue/GRDB.swift/blob/master/Documentation/FTS5Tokenizers.md) +/// for more information. /// -/// Instead, they process regular Swift strings. +/// ## Topics /// -/// Here is the implementation for a trivial tokenizer that wraps the -/// built-in ascii tokenizer without any custom processing: +/// ### Tokenizing Text /// -/// class TrivialAsciiTokenizer : FTS5WrapperTokenizer { -/// static let name = "trivial" -/// let wrappedTokenizer: FTS5Tokenizer -/// -/// init(db: Database, arguments: [String]) throws { -/// wrappedTokenizer = try db.makeTokenizer(.ascii()) -/// } -/// -/// func accept( -/// token: String, -/// flags: FTS5TokenFlags, -/// for tokenization: FTS5Tokenization, -/// tokenCallback: FTS5WrapperTokenCallback) -/// throws -/// { -/// try tokenCallback(token, flags) -/// } -/// } +/// - ``accept(token:flags:for:tokenCallback:)`` +/// - ``FTS5WrapperTokenCallback`` public protocol FTS5WrapperTokenizer: FTS5CustomTokenizer { /// The wrapped tokenizer var wrappedTokenizer: any FTS5Tokenizer { get } @@ -57,16 +41,17 @@ public protocol FTS5WrapperTokenizer: FTS5CustomTokenizer { /// /// For example: /// - /// func accept( - /// token: String, - /// flags: FTS5TokenFlags, - /// for tokenization: FTS5Tokenization, - /// tokenCallback: FTS5WrapperTokenCallback) - /// throws - /// { - /// // pass through: - /// try tokenCallback(token, flags) - /// } + /// ```swift + /// func accept( + /// token: String, + /// flags: FTS5TokenFlags, + /// for tokenization: FTS5Tokenization, + /// tokenCallback: FTS5WrapperTokenCallback + /// ) throws { + /// // pass through: + /// try tokenCallback(token, flags) + /// } + /// ``` /// /// When implementing the accept method, there are a two rules /// to observe: @@ -74,8 +59,8 @@ public protocol FTS5WrapperTokenizer: FTS5CustomTokenizer { /// 1. Errors thrown by the tokenCallback function must not be caught. /// /// 2. The input `flags` should be given unmodified to the tokenCallback - /// function, unless you union it with the .colocated flag when the - /// tokenizer produces synonyms (see + /// function, unless you union it with the ``FTS5TokenFlags/colocated`` flag + /// when the tokenizer produces synonyms (see /// ). /// /// - parameters: @@ -99,11 +84,10 @@ private struct FTS5WrapperContext { } extension FTS5WrapperTokenizer { - /// Default implementation public func tokenize( context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, - pText: UnsafePointer?, + pText: UnsafePointer?, nText: CInt, tokenCallback: @escaping FTS5TokenCallback) -> CInt @@ -154,7 +138,7 @@ extension FTS5WrapperTokenizer { return } let pToken = UnsafeMutableRawPointer(mutating: addr) - .assumingMemoryBound(to: Int8.self) + .assumingMemoryBound(to: CChar.self) let nToken = CInt(buffer.count) // Inject token bytes into SQLite diff --git a/GRDB/Fixits.swift b/GRDB/Fixits.swift index 8ff802b97a..ed82d514de 100644 --- a/GRDB/Fixits.swift +++ b/GRDB/Fixits.swift @@ -57,7 +57,7 @@ extension DatabaseCursor { extension DatabaseMigrator { @available(*, unavailable, message: "The completion function now accepts one Result argument") public func asyncMigrate( - _ writer: DatabaseWriter, + _ writer: any DatabaseWriter, completion: @escaping (Database, Error?) -> Void) { preconditionFailure() } } @@ -119,7 +119,7 @@ extension PersistableRecord { public func performSave(_ db: Database) throws { preconditionFailure() } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension QueryInterfaceRequest where RowDecoder: Identifiable, RowDecoder.ID: DatabaseValueConvertible { @available(*, unavailable, message: "selectID() has been removed. You may use selectPrimaryKey(as:) instead.") public func selectID() -> QueryInterfaceRequest { preconditionFailure() } @@ -144,13 +144,13 @@ extension SelectionRequest { @available(*, unavailable, renamed: "SQLExpression.AssociativeBinaryOperator") public typealias SQLAssociativeBinaryOperator = SQLExpression.AssociativeBinaryOperator -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Table where RowDecoder: Identifiable, RowDecoder.ID: DatabaseValueConvertible { @available(*, unavailable, message: "selectID() has been removed. You may use selectPrimaryKey(as:) instead.") public func selectID() -> QueryInterfaceRequest { preconditionFailure() } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension TableRecord where Self: Identifiable, ID: DatabaseValueConvertible { @available(*, unavailable, message: "selectID() has been removed. You may use selectPrimaryKey(as:) instead.") public static func selectID() -> QueryInterfaceRequest { preconditionFailure() } @@ -167,3 +167,5 @@ extension ValueObservation { where Reducer == ValueReducers.Fetch { preconditionFailure() } } + +// swiftlint:enable all diff --git a/GRDB/JSON/JSONColumn.swift b/GRDB/JSON/JSONColumn.swift new file mode 100644 index 0000000000..8f778d2b51 --- /dev/null +++ b/GRDB/JSON/JSONColumn.swift @@ -0,0 +1,91 @@ +/// A JSON column in a database table. +/// +/// ## Overview +/// +/// `JSONColumn` has benefits over ``Column`` for database columns that +/// contain JSON strings. +/// +/// It behaves like a regular `Column`, with all extra conveniences and +/// behaviors of ``SQLJSONExpressible``. +/// +/// For example, the sample code below directly accesses the "countryCode" +/// key of the "address" JSON column: +/// +/// ```swift +/// struct Player: Codable { +/// var id: Int64 +/// var name: String +/// var address: Address +/// } +/// +/// struct Address: Codable { +/// var street: String +/// var city: String +/// var countryCode: String +/// } +/// +/// extension Player: FetchableRecord, PersistableRecord { +/// enum Columns { +/// static let id = Column(CodingKeys.id) +/// static let name = Column(CodingKeys.name) +/// static let address = JSONColumn(CodingKeys.address) // JSONColumn! +/// } +/// } +/// +/// try dbQueue.write { db in +/// // In a real app, table creation should happen in a migration. +/// try db.create(table: "player") { t in +/// t.autoIncrementedPrimaryKey("id") +/// t.column("name", .text).notNull() +/// t.column("address", .jsonText).notNull() +/// } +/// +/// // Fetch all country codes +/// // SELECT DISTINCT address ->> 'countryCode' FROM player +/// let countryCodes: [String] = try Player +/// .select(Player.Columns.address["countryCode"], as: String.self) +/// .distinct() +/// .fetchAll(db) +/// } +/// ``` +/// +/// > Tip: When you can not create a `JSONColumn`, you'll get the same +/// > convenient access to JSON subcomponents +/// > with ``SQLSpecificExpressible/asJSON``. +/// > +/// > For example, the above sample can be adapted as below: +/// > +/// > ```swift +/// > extension Player: FetchableRecord, PersistableRecord { +/// > // That's another valid way to define columns. +/// > // But we don't have any JSONColumn this time. +/// > enum Columns: String, ColumnExpression { +/// > case id, name, address +/// > } +/// > } +/// > +/// > try dbQueue.write { db in +/// > // Fetch all country codes +/// > // SELECT DISTINCT address ->> 'countryCode' FROM player +/// > let countryCodes: [String] = try Player +/// > .select(Player.Columns.address.asJSON["countryCode"], as: String.self) +/// > .distinct() +/// > .fetchAll(db) +/// > } +/// > ``` +public struct JSONColumn: ColumnExpression, SQLJSONExpressible, Sendable { + public var name: String + + /// Creates a `JSONColumn` given its name. + /// + /// The name should be unqualified, such as `"score"`. Qualified name such + /// as `"player.score"` are unsupported. + public init(_ name: String) { + self.name = name + } + + /// Creates a `JSONColumn` given a `CodingKey`. + public init(_ codingKey: some CodingKey) { + self.name = codingKey.stringValue + } +} diff --git a/GRDB/JSON/SQLJSONExpressible.swift b/GRDB/JSON/SQLJSONExpressible.swift new file mode 100644 index 0000000000..991a5dd7d5 --- /dev/null +++ b/GRDB/JSON/SQLJSONExpressible.swift @@ -0,0 +1,440 @@ +/// A type of SQL expression that is interpreted as a JSON value. +/// +/// ## Overview +/// +/// JSON values that conform to `SQLJSONExpressible` have two purposes: +/// +/// - They provide Swift APIs for accessing their JSON subcomponents at +/// the SQL level. +/// +/// - When used in a JSON-building function such as +/// ``Database/jsonArray(_:)-8xxe3`` or ``Database/jsonObject(_:)``, +/// they are parsed and interpreted as JSON, not as plain strings. +/// +/// To build a JSON value, create a ``JSONColumn``, or call the +/// ``SQLSpecificExpressible/asJSON`` property of any +/// other expression. +/// +/// For example, here are some JSON values: +/// +/// ```swift +/// // JSON columns: +/// JSONColumn("info") +/// Column("info").asJSON +/// +/// // The JSON array [1, 2, 3]: +/// "[1, 2, 3]".databaseValue.asJSON +/// +/// // A JSON value that will trigger a +/// // "malformed JSON" SQLite error when +/// // parsed by SQLite: +/// "{foo".databaseValue.asJSON +/// ``` +/// +/// The expressions below are not JSON values: +/// +/// ```swift +/// // A plain column: +/// Column("info") +/// +/// // Plain strings: +/// "[1, 2, 3]" +/// "{foo" +/// ``` +/// +/// ## Access JSON subcomponents +/// +/// JSON values provide access to the [`->` and `->>` SQL operators](https://www.sqlite.org/json1.html) +/// and other SQLite JSON functions: +/// +/// ```swift +/// let info = JSONColumn("info") +/// +/// // SELECT info ->> 'firstName' FROM player +/// // → 'Arthur' +/// let firstName = try Player +/// .select(info["firstName"], as: String.self) +/// .fetchOne(db) +/// +/// // SELECT info ->> 'address' FROM player +/// // → '{"street":"Rue de Belleville","city":"Paris"}' +/// let address = try Player +/// .select(info["address"], as: String.self) +/// .fetchOne(db) +/// ``` +/// +/// ## Build JSON objects and arrays from JSON values +/// +/// When used in a JSON-building function such as +/// ``Database/jsonArray(_:)-8xxe3`` or ``Database/jsonObject(_:)-5iswr``, +/// JSON values are parsed and interpreted as JSON, not as plain strings. +/// +/// In the example below, we can see how the `JSONColumn` is interpreted as +/// JSON, while the `Column` with the same name is interpreted as a +/// plain string: +/// +/// ```swift +/// let elements: [any SQLExpressible] = [ +/// JSONColumn("address"), +/// Column("address"), +/// ] +/// +/// let array = Database.jsonArray(elements) +/// +/// // SELECT JSON_ARRAY(JSON(address), address) FROM player +/// // → '[{"country":"FR"},"{\"country\":\"FR\"}"]' +/// // <--- object ---> <------ string ------> +/// let json = try Player +/// .select(array, as: String.self) +/// .fetchOne(db) +/// ``` +/// +/// ## Topics +/// +/// ### Accessing JSON subcomponents +/// +/// - ``subscript(_:)`` +/// - ``jsonExtract(atPath:)`` +/// - ``jsonExtract(atPaths:)`` +/// - ``jsonRepresentation(atPath:)`` +/// +/// ### Supporting Types +/// +/// - ``AnySQLJSONExpressible`` +public protocol SQLJSONExpressible: SQLSpecificExpressible { } + +extension ColumnExpression where Self: SQLJSONExpressible { + /// Returns an SQL column that is interpreted as a JSON value. + public var sqlExpression: SQLExpression { + .column(name).withPreferredJSONInterpretation(.jsonValue) + } +} + +// This type only grants access to `SQLJSONExpressible` apis. The fact that +// it is a JSON value is embedded in its +// `sqlExpression.preferredJSONInterpretation`. +/// A type-erased ``SQLJSONExpressible``. +public struct AnySQLJSONExpressible: SQLJSONExpressible { + /// An SQL expression that is interpreted as a JSON value. + public let sqlExpression: SQLExpression + + public init(_ base: some SQLJSONExpressible) { + self.init(sqlExpression: base.sqlExpression) + } + + /// - Precondition: `sqlExpression` is a JSON value + init(sqlExpression: SQLExpression) { + assert(sqlExpression.preferredJSONInterpretation == .jsonValue) + self.sqlExpression = sqlExpression + } +} + +extension SQLSpecificExpressible { + /// Returns an expression that is interpreted as a JSON value. + /// + /// For example: + /// + /// ```swift + /// let info = Column("info").asJSON + /// + /// // SELECT info ->> 'firstName' FROM player + /// // → 'Arthur' + /// let firstName = try Player + /// .select(info["firstName"], as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// For more information, see ``SQLJSONExpressible``. + public var asJSON: AnySQLJSONExpressible { + AnySQLJSONExpressible(sqlExpression: sqlExpression.withPreferredJSONInterpretation(.jsonValue)) + } +} + +#if GRDBCUSTOMSQLITE || GRDBCIPHER +extension SQLJSONExpressible { + /// The `->>` SQL operator. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT info ->> 'firstName' FROM player + /// // → 'Arthur' + /// let firstName = try Player + /// .select(info["firstName"], as: String.self) + /// .fetchOne(db) + /// + /// // SELECT info ->> 'address' FROM player + /// // → '{"street":"Rue de Belleville","city":"Paris"}' + /// let address = try Player + /// .select(info["address"], as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments), + /// or an JSON object field label, or an array index. + public subscript(_ path: some SQLExpressible) -> SQLExpression { + .binary(.jsonExtractSQL, sqlExpression, path.sqlExpression) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT JSON_EXTRACT(info, '$.firstName') FROM player + /// // → 'Arthur' + /// let firstName = try Player + /// .select(info.jsonExtract(atPath: "$.firstName"), as: String.self) + /// .fetchOne(db) + /// + /// // SELECT JSON_EXTRACT(info, '$.address') FROM player + /// // → '{"street":"Rue de Belleville","city":"Paris"}' + /// let address = try Player + /// .select(info.jsonExtract(atPath: "$.address"), as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + public func jsonExtract(atPath path: some SQLExpressible) -> SQLExpression { + Database.jsonExtract(self, atPath: path) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT JSON_EXTRACT(info, '$.firstName', '$.lastName') FROM player + /// // → '["Arthur","Miller"]' + /// let nameComponents = try Player + /// .select(info.jsonExtract(atPaths: ["$.firstName", "$.lastName"]), as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter paths: A collection of [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + public func jsonExtract(atPaths paths: C) -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + Database.jsonExtract(self, atPaths: paths) + } + + /// Returns a valid JSON string with the `->` SQL operator. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT info -> 'firstName' FROM player + /// // → '"Arthur"' + /// let name = try Player + /// .select(info.jsonRepresentation(atPath: "firstName"), as: String.self) + /// .fetchOne(db) + /// + /// // SELECT info -> 'address' FROM player + /// // → '{"street":"Rue de Belleville","city":"Paris"}' + /// let name = try Player + /// .select(info.jsonRepresentation(atPath: "address"), as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments), + /// or an JSON object field label, or an array index. + public func jsonRepresentation(atPath path: some SQLExpressible) -> SQLExpression { + .binary(.jsonExtractJSON, sqlExpression, path.sqlExpression) + } +} +#else +extension SQLJSONExpressible { + /// The `->>` SQL operator. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT info ->> 'firstName' FROM player + /// // → 'Arthur' + /// let firstName = try Player + /// .select(info["firstName"], as: String.self) + /// .fetchOne(db) + /// + /// // SELECT info ->> 'address' FROM player + /// // → '{"street":"Rue de Belleville","city":"Paris"}' + /// let address = try Player + /// .select(info["address"], as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments), + /// or an JSON object field label, or an array index. + @available(iOS 16, macOS 13.2, tvOS 17, watchOS 9, *) // SQLite 3.38+ + public subscript(_ path: some SQLExpressible) -> SQLExpression { + .binary(.jsonExtractSQL, sqlExpression, path.sqlExpression) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT JSON_EXTRACT(info, '$.firstName') FROM player + /// // → 'Arthur' + /// let firstName = try Player + /// .select(info.jsonExtract(atPath: "$.firstName"), as: String.self) + /// .fetchOne(db) + /// + /// // SELECT JSON_EXTRACT(info, '$.address') FROM player + /// // → '{"street":"Rue de Belleville","city":"Paris"}' + /// let address = try Player + /// .select(info.jsonExtract(atPath: "$.address"), as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public func jsonExtract(atPath path: some SQLExpressible) -> SQLExpression { + Database.jsonExtract(self, atPath: path) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT JSON_EXTRACT(info, '$.firstName', '$.lastName') FROM player + /// // → '["Arthur","Miller"]' + /// let nameComponents = try Player + /// .select(info.jsonExtract(atPaths: ["$.firstName", "$.lastName"]), as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter paths: A collection of [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public func jsonExtract(atPaths paths: C) -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + Database.jsonExtract(self, atPaths: paths) + } + + /// Returns a valid JSON string with the `->` SQL operator. + /// + /// For example: + /// + /// ```swift + /// let info = JSONColumn("info") + /// + /// // SELECT info -> 'firstName' FROM player + /// // → '"Arthur"' + /// let name = try Player + /// .select(info.jsonRepresentation(atPath: "firstName"), as: String.self) + /// .fetchOne(db) + /// + /// // SELECT info -> 'address' FROM player + /// // → '{"street":"Rue de Belleville","city":"Paris"}' + /// let name = try Player + /// .select(info.jsonRepresentation(atPath: "address"), as: String.self) + /// .fetchOne(db) + /// ``` + /// + /// Related SQL documentation: + /// + /// - parameter path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments), + /// or an JSON object field label, or an array index. + @available(iOS 16, macOS 13.2, tvOS 17, watchOS 9, *) // SQLite 3.38+ + public func jsonRepresentation(atPath path: some SQLExpressible) -> SQLExpression { + .binary(.jsonExtractJSON, sqlExpression, path.sqlExpression) + } +} + +// TODO: Enable when those apis are ready. +// extension ColumnExpression where Self: SQLJSONExpressible { +// /// Updates a columns with the `JSON_PATCH` SQL function. +// /// +// /// For example: +// /// +// /// ```swift +// /// // UPDATE player SET address = JSON_PATCH(address, '{"country": "FR"}') +// /// try Player.updateAll(db, [ +// /// JSONColumn("address").jsonPatch(#"{"country": "FR"}"#) +// /// ]) +// /// ``` +// /// +// /// Related SQLite documentation: +// @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS +// public func jsonPatch( +// with patch: some SQLExpressible) +// -> ColumnAssignment +// { +// .init(columnName: name, value: Database.jsonPatch(self, with: patch)) +// } +// +// /// Updates a columns with the `JSON_REMOVE` SQL function. +// /// +// /// For example: +// /// +// /// ```swift +// /// // UPDATE player SET address = JSON_REMOVE(address, '$.country') +// /// try Player.updateAll(db, [ +// /// JSONColumn("address").jsonRemove(atPath: "$.country") +// /// ]) +// /// ``` +// /// +// /// Related SQLite documentation: +// /// +// /// - Parameters: +// /// - paths: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). +// @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS +// public func jsonRemove(atPath path: some SQLExpressible) -> ColumnAssignment { +// .init(columnName: name, value: Database.jsonRemove(self, atPath: path)) +// } +// +// /// Updates a columns with the `JSON_REMOVE` SQL function. +// /// +// /// For example: +// /// +// /// ```swift +// /// // UPDATE player SET address = JSON_REMOVE(address, '$.country', '$.city') +// /// try Player.updateAll(db, [ +// /// JSONColumn("address").jsonRemove(atPatsh: ["$.country", "$.city"]) +// /// ]) +// /// ``` +// /// +// /// Related SQLite documentation: +// /// +// /// - Parameters: +// /// - paths: A collection of [JSON paths](https://www.sqlite.org/json1.html#path_arguments). +// @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS +// public func jsonRemove(atPaths paths: C) +// -> ColumnAssignment +// where C: Collection, C.Element: SQLExpressible +// { +// .init(columnName: name, value: Database.jsonRemove(self, atPaths: paths)) +// } +// +// } +#endif diff --git a/GRDB/JSON/SQLJSONFunctions.swift b/GRDB/JSON/SQLJSONFunctions.swift new file mode 100644 index 0000000000..85122f2895 --- /dev/null +++ b/GRDB/JSON/SQLJSONFunctions.swift @@ -0,0 +1,866 @@ +#if GRDBCUSTOMSQLITE || GRDBCIPHER +extension Database { + /// Validates and minifies a JSON string, with the `JSON` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON(' { "a": [ "test" ] } ') → '{"a":["test"]}' + /// Database.json(#" { "a": [ "test" ] } "#) + /// ``` + /// + /// Related SQLite documentation: + public static func json(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON", [value.sqlExpression]) + } + + /// Creates a JSON array with the `JSON_ARRAY` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY(1, 2, 3, 4) → '[1,2,3,4]' + /// Database.jsonArray(1...4) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonArray(_ values: C) -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + .function("JSON_ARRAY", values.map(\.sqlExpression.jsonBuilderExpression)) + } + + /// Creates a JSON array with the `JSON_ARRAY` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY(1, 2, '3', 4) → '[1,2,"3",4]' + /// Database.jsonArray([1, 2, "3", 4]) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonArray(_ values: C) -> SQLExpression + where C: Collection, C.Element == any SQLExpressible + { + .function("JSON_ARRAY", values.map(\.sqlExpression.jsonBuilderExpression)) + } + + /// The number of elements in a JSON array, as returned by the + /// `JSON_ARRAY_LENGTH` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY_LENGTH('[1,2,3,4]') → 4 + /// Database.jsonArrayLength("[1,2,3,4]") + /// ``` + /// + /// Related SQLite documentation: + public static func jsonArrayLength(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_ARRAY_LENGTH", [value.sqlExpression]) + } + + /// The number of elements in a JSON array, as returned by the + /// `JSON_ARRAY_LENGTH` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY_LENGTH('{"one":[1,2,3]}', '$.one') → 3 + /// Database.jsonArrayLength(#"{"one":[1,2,3]}"#, atPath: "$.one") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON array. + /// - path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonArrayLength( + _ value: some SQLExpressible, + atPath path: some SQLExpressible) + -> SQLExpression + { + .function("JSON_ARRAY_LENGTH", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_ERROR_POSITION` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ERROR_POSITION(info) + /// Database.jsonErrorPosition(Column("info")) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonErrorPosition(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_ERROR_POSITION", [value.sqlExpression]) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_EXTRACT('{"a":123}', '$.a') → 123 + /// Database.jsonExtract(#"{"a":123}"#, atPath: "$.a") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonExtract(_ value: some SQLExpressible, atPath path: some SQLExpressible) -> SQLExpression { + .function("JSON_EXTRACT", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_EXTRACT('{"a":2,"c":[4,5]}','$.c','$.a') → '[[4,5],2]' + /// Database.jsonExtract(#"{"a":2,"c":[4,5]}"#, atPaths: ["$.c", "$.a"]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A collection of [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonExtract(_ value: some SQLExpressible, atPaths paths: C) + -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + .function("JSON_EXTRACT", [value.sqlExpression] + paths.map(\.sqlExpression)) + } + + /// The `JSON_INSERT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_INSERT('[1,2,3,4]','$[#]',99) → '[1,2,3,4,99]' + /// Database.jsonInsert("[1,2,3,4]", ["$[#]": value: 99]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - assignments: A collection of key/value pairs, where keys are + /// [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonInsert( + _ value: some SQLExpressible, + _ assignments: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_INSERT", [value.sqlExpression] + assignments.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// The `JSON_REPLACE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_REPLACE('{"a":2,"c":4}', '$.a', 99) → '{"a":99,"c":4}' + /// Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": 99]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - assignments: A collection of key/value pairs, where keys are + /// [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonReplace( + _ value: some SQLExpressible, + _ assignments: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_REPLACE", [value.sqlExpression] + assignments.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// The `JSON_SET` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_SET('{"a":2,"c":4}', '$.a', 99) → '{"a":99,"c":4}' + /// Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": 99]]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - assignments: A collection of key/value pairs, where keys are + /// [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonSet( + _ value: some SQLExpressible, + _ assignments: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_SET", [value.sqlExpression] + assignments.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// Creates a JSON object with the `JSON_OBJECT` SQL function. Pass + /// key/value pairs with a Swift collection such as a `Dictionary`. + /// + /// For example: + /// + /// ```swift + /// // JSON_OBJECT('c', '{"e":5}') → '{"c":"{\"e\":5}"}' + /// Database.jsonObject([ + /// "c": #"{"e":5}"#, + /// ]) + /// + /// // JSON_OBJECT('c', JSON_OBJECT('e', 5)) → '{"c":{"e":5}}' + /// Database.jsonObject([ + /// "c": Database.jsonObject(["e": 5])), + /// ]) + /// + /// // JSON_OBJECT('c', JSON('{"e":5}')) → '{"c":{"e":5}}' + /// Database.jsonObject([ + /// "c": Database.json(#"{"e":5}"#), + /// ]) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonObject(_ elements: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_OBJECT", elements.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// The `JSON_PATCH` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_PATCH('{"a":1,"b":2}','{"c":3,"d":4}') → '{"a":1,"b":2,"c":3,"d":4}' + /// Database.jsonPatch(#"{"a":1,"b":2}"#, #"{"c":3,"d":4}"#) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonPatch( + _ value: some SQLExpressible, + with patch: some SQLExpressible) + -> SQLExpression + { + .function("JSON_PATCH", [value.sqlExpression, patch.sqlExpression]) + } + + /// The `JSON_REMOVE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_REMOVE('[0,1,2,3,4]', '$[2]') → '[0,1,3,4]' + /// Database.jsonRemove("[0,1,2,3,4]", atPath: "$[2]") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonRemove(_ value: some SQLExpressible, atPath path: some SQLExpressible) -> SQLExpression { + .function("JSON_REMOVE", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_REMOVE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_REMOVE('[0,1,2,3,4]', '$[2]','$[0]') → '[1,3,4]' + /// Database.jsonRemove("[0,1,2,3,4]", atPaths: ["$[2]", "$[0]"]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A collection of [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonRemove(_ value: some SQLExpressible, atPaths paths: C) + -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + .function("JSON_REMOVE", [value.sqlExpression] + paths.map(\.sqlExpression)) + } + + /// The `JSON_TYPE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}') → 'object' + /// Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonType(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_TYPE", [value.sqlExpression]) + } + + /// The `JSON_TYPE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}', '$.a') → 'object' + /// Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#, atPath: "$.a") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + public static func jsonType(_ value: some SQLExpressible, atPath path: some SQLExpressible) -> SQLExpression { + .function("JSON_TYPE", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_VALID` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_VALID('{"x":35') → 0 + /// Database.jsonIsValid(#"{"x":35"#) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonIsValid(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_VALID", [value.sqlExpression]) + } + + /// The `JSON_QUOTE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_QUOTE('[1]') → '"[1]"' + /// Database.jsonQuote("[1]") + /// + /// // JSON_QUOTE(JSON('[1]')) → '[1]' + /// Database.jsonQuote(Database.json("[1]")) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonQuote(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_QUOTE", [value.sqlExpression.jsonBuilderExpression]) + } + + /// The `JSON_GROUP_ARRAY` SQL function. + /// + /// For example: + /// + /// ```swift + /// // SELECT JSON_GROUP_ARRAY(name) FROM player + /// Player.select(Database.jsonGroupArray(Column("name"))) + /// + /// // SELECT JSON_GROUP_ARRAY(name) FILTER (WHERE score > 0) FROM player + /// Player.select(Database.jsonGroupArray(Column("name"), filter: Column("score") > 0)) + /// + /// // SELECT JSON_GROUP_ARRAY(name ORDER BY name) FROM player + /// Player.select(Database.jsonGroupArray(Column("name"), orderBy: Column("name"))) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonGroupArray( + _ value: some SQLExpressible, + orderBy ordering: (any SQLOrderingTerm)? = nil, + filter: (any SQLSpecificExpressible)? = nil) + -> SQLExpression { + .aggregateFunction( + "JSON_GROUP_ARRAY", + [value.sqlExpression.jsonBuilderExpression], + ordering: ordering?.sqlOrdering, + filter: filter?.sqlExpression, + isJSONValue: true) + } + + /// The `JSON_GROUP_OBJECT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // SELECT JSON_GROUP_OBJECT(name, score) FROM player + /// Player.select(Database.jsonGroupObject( + /// key: Column("name"), + /// value: Column("score"))) + /// + /// // SELECT JSON_GROUP_OBJECT(name, score) FILTER (WHERE score > 0) FROM player + /// Player.select(Database.jsonGroupObject( + /// key: Column("name"), + /// value: Column("score"), + /// filter: Column("score") > 0)) + /// ``` + /// + /// Related SQLite documentation: + public static func jsonGroupObject( + key: some SQLExpressible, + value: some SQLExpressible, + filter: (any SQLSpecificExpressible)? = nil + ) -> SQLExpression { + .aggregateFunction( + "JSON_GROUP_OBJECT", + [key.sqlExpression, value.sqlExpression.jsonBuilderExpression], + filter: filter?.sqlExpression, + isJSONValue: true) + } +} +#else +extension Database { + /// Validates and minifies a JSON string, with the `JSON` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON(' { "a": [ "test" ] } ') → '{"a":["test"]}' + /// Database.json(#" { "a": [ "test" ] } "#) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func json(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON", [value.sqlExpression]) + } + + /// Creates a JSON array with the `JSON_ARRAY` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY(1, 2, 3, 4) → '[1,2,3,4]' + /// Database.jsonArray(1...4) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonArray(_ values: C) -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + .function("JSON_ARRAY", values.map(\.sqlExpression.jsonBuilderExpression)) + } + + /// Creates a JSON array with the `JSON_ARRAY` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY(1, 2, '3', 4) → '[1,2,"3",4]' + /// Database.jsonArray([1, 2, "3", 4]) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonArray(_ values: C) -> SQLExpression + where C: Collection, C.Element == any SQLExpressible + { + .function("JSON_ARRAY", values.map(\.sqlExpression.jsonBuilderExpression)) + } + + /// The number of elements in a JSON array, as returned by the + /// `JSON_ARRAY_LENGTH` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY_LENGTH('[1,2,3,4]') → 4 + /// Database.jsonArrayLength("[1,2,3,4]") + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonArrayLength(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_ARRAY_LENGTH", [value.sqlExpression]) + } + + /// The number of elements in a JSON array, as returned by the + /// `JSON_ARRAY_LENGTH` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_ARRAY_LENGTH('{"one":[1,2,3]}', '$.one') → 3 + /// Database.jsonArrayLength(#"{"one":[1,2,3]}"#, atPath: "$.one") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON array. + /// - path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonArrayLength( + _ value: some SQLExpressible, + atPath path: some SQLExpressible) + -> SQLExpression + { + .function("JSON_ARRAY_LENGTH", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_EXTRACT('{"a":123}', '$.a') → 123 + /// Database.jsonExtract(#"{"a":123}"#, atPath: "$.a") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - path: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonExtract(_ value: some SQLExpressible, atPath path: some SQLExpressible) -> SQLExpression { + .function("JSON_EXTRACT", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_EXTRACT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_EXTRACT('{"a":2,"c":[4,5]}','$.c','$.a') → '[[4,5],2]' + /// Database.jsonExtract(#"{"a":2,"c":[4,5]}"#, atPaths: ["$.c", "$.a"]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A collection of [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonExtract(_ value: some SQLExpressible, atPaths paths: C) + -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + .function("JSON_EXTRACT", [value.sqlExpression] + paths.map(\.sqlExpression)) + } + + /// The `JSON_INSERT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_INSERT('[1,2,3,4]','$[#]',99) → '[1,2,3,4,99]' + /// Database.jsonInsert("[1,2,3,4]", ["$[#]": value: 99]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - assignments: A collection of key/value pairs, where keys are + /// [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonInsert( + _ value: some SQLExpressible, + _ assignments: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_INSERT", [value.sqlExpression] + assignments.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// The `JSON_REPLACE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_REPLACE('{"a":2,"c":4}', '$.a', 99) → '{"a":99,"c":4}' + /// Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": 99]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - assignments: A collection of key/value pairs, where keys are + /// [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonReplace( + _ value: some SQLExpressible, + _ assignments: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_REPLACE", [value.sqlExpression] + assignments.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// The `JSON_SET` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_SET('{"a":2,"c":4}', '$.a', 99) → '{"a":99,"c":4}' + /// Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": 99]]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - assignments: A collection of key/value pairs, where keys are + /// [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonSet( + _ value: some SQLExpressible, + _ assignments: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_SET", [value.sqlExpression] + assignments.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// Creates a JSON object with the `JSON_OBJECT` SQL function. Pass + /// key/value pairs with a Swift collection such as a `Dictionary`. + /// + /// For example: + /// + /// ```swift + /// // JSON_OBJECT('c', '{"e":5}') → '{"c":"{\"e\":5}"}' + /// Database.jsonObject([ + /// "c": #"{"e":5}"#, + /// ]) + /// + /// // JSON_OBJECT('c', JSON_OBJECT('e', 5)) → '{"c":{"e":5}}' + /// Database.jsonObject([ + /// "c": Database.jsonObject(["e": 5])), + /// ]) + /// + /// // JSON_OBJECT('c', JSON('{"e":5}')) → '{"c":{"e":5}}' + /// Database.jsonObject([ + /// "c": Database.json(#"{"e":5}"#), + /// ]) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonObject(_ elements: C) + -> SQLExpression + where C: Collection, + C.Element == (key: String, value: any SQLExpressible) + { + .function("JSON_OBJECT", elements.flatMap { + [$0.key.sqlExpression, $0.value.sqlExpression.jsonBuilderExpression] + }) + } + + /// The `JSON_PATCH` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_PATCH('{"a":1,"b":2}','{"c":3,"d":4}') → '{"a":1,"b":2,"c":3,"d":4}' + /// Database.jsonPatch(#"{"a":1,"b":2}"#, #"{"c":3,"d":4}"#) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonPatch( + _ value: some SQLExpressible, + with patch: some SQLExpressible) + -> SQLExpression + { + .function("JSON_PATCH", [value.sqlExpression, patch.sqlExpression]) + } + + /// The `JSON_REMOVE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_REMOVE('[0,1,2,3,4]', '$[2]') → '[0,1,3,4]' + /// Database.jsonRemove("[0,1,2,3,4]", atPath: "$[2]") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonRemove(_ value: some SQLExpressible, atPath path: some SQLExpressible) -> SQLExpression { + .function("JSON_REMOVE", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_REMOVE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_REMOVE('[0,1,2,3,4]', '$[2]','$[0]') → '[1,3,4]' + /// Database.jsonRemove("[0,1,2,3,4]", atPaths: ["$[2]", "$[0]"]) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A collection of [JSON paths](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonRemove(_ value: some SQLExpressible, atPaths paths: C) + -> SQLExpression + where C: Collection, C.Element: SQLExpressible + { + .function("JSON_REMOVE", [value.sqlExpression] + paths.map(\.sqlExpression)) + } + + /// The `JSON_TYPE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}') → 'object' + /// Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonType(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_TYPE", [value.sqlExpression]) + } + + /// The `JSON_TYPE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}', '$.a') → 'object' + /// Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#, atPath: "$.a") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameters: + /// - value: A JSON value. + /// - paths: A [JSON path](https://www.sqlite.org/json1.html#path_arguments). + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonType(_ value: some SQLExpressible, atPath path: some SQLExpressible) -> SQLExpression { + .function("JSON_TYPE", [value.sqlExpression, path.sqlExpression]) + } + + /// The `JSON_VALID` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_VALID('{"x":35') → 0 + /// Database.jsonIsValid(#"{"x":35"#) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonIsValid(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_VALID", [value.sqlExpression]) + } + + /// Returns a valid JSON string with the `JSON_QUOTE` SQL function. + /// + /// For example: + /// + /// ```swift + /// // JSON_QUOTE('[1]') → '"[1]"' + /// Database.jsonQuote("[1]") + /// + /// // JSON_QUOTE(JSON('[1]')) → '[1]' + /// Database.jsonQuote(Database.json("[1]")) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonQuote(_ value: some SQLExpressible) -> SQLExpression { + .function("JSON_QUOTE", [value.sqlExpression.jsonBuilderExpression]) + } + + /// The `JSON_GROUP_ARRAY` SQL function. + /// + /// For example: + /// + /// ```swift + /// // SELECT JSON_GROUP_ARRAY(name) FROM player + /// Player.select(Database.jsonGroupArray(Column("name"))) + /// + /// // SELECT JSON_GROUP_ARRAY(name) FILTER (WHERE score > 0) FROM player + /// Player.select(Database.jsonGroupArray(Column("name"), filter: Column("score") > 0)) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonGroupArray( + _ value: some SQLExpressible, + filter: (any SQLSpecificExpressible)? = nil) + -> SQLExpression { + .aggregateFunction( + "JSON_GROUP_ARRAY", + [value.sqlExpression.jsonBuilderExpression], + filter: filter?.sqlExpression, + isJSONValue: true) + } + + /// The `JSON_GROUP_OBJECT` SQL function. + /// + /// For example: + /// + /// ```swift + /// // SELECT JSON_GROUP_OBJECT(name, score) FROM player + /// Player.select(Database.jsonGroupObject( + /// key: Column("name"), + /// value: Column("score"))) + /// + /// // SELECT JSON_GROUP_OBJECT(name, score) FILTER (WHERE score > 0) FROM player + /// Player.select(Database.jsonGroupObject( + /// key: Column("name"), + /// value: Column("score"), + /// filter: Column("score") > 0)) + /// ``` + /// + /// Related SQLite documentation: + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + public static func jsonGroupObject( + key: some SQLExpressible, + value: some SQLExpressible, + filter: (any SQLSpecificExpressible)? = nil + ) -> SQLExpression { + .aggregateFunction( + "JSON_GROUP_OBJECT", + [key.sqlExpression, value.sqlExpression.jsonBuilderExpression], + filter: filter?.sqlExpression, + isJSONValue: true) + } +} +#endif diff --git a/GRDB/Migration/DatabaseMigrator.swift b/GRDB/Migration/DatabaseMigrator.swift index 974fa753b4..f4de96c69e 100644 --- a/GRDB/Migration/DatabaseMigrator.swift +++ b/GRDB/Migration/DatabaseMigrator.swift @@ -42,7 +42,7 @@ import Foundation /// - ``hasCompletedMigrations(_:)`` public struct DatabaseMigrator { /// Controls how a migration handle foreign keys constraints. - public enum ForeignKeyChecks { + public enum ForeignKeyChecks: Sendable { /// The migration runs with disabled foreign keys. /// /// Foreign keys are checked right before changes are committed on disk, @@ -50,7 +50,7 @@ public struct DatabaseMigrator { /// ``DatabaseMigrator/disablingDeferredForeignKeyChecks()``. /// /// In this case, you can perform your own deferred foreign key checks - /// with ``Database/checkForeignKeys(in:)`` or + /// with ``Database/checkForeignKeys(in:in:)`` or /// ``Database/checkForeignKeys()``: /// /// ```swift @@ -118,7 +118,7 @@ public struct DatabaseMigrator { /// The returned migrator is _unsafe_, because it no longer guarantees the /// integrity of the database. It is now _your_ responsibility to register /// migrations that do not break foreign key constraints. See - /// ``Database/checkForeignKeys()`` and ``Database/checkForeignKeys(in:)``. + /// ``Database/checkForeignKeys()`` and ``Database/checkForeignKeys(in:in:)``. /// /// Running migrations without foreign key checks can improve migration /// performance on huge databases. @@ -269,8 +269,8 @@ public struct DatabaseMigrator { writer.asyncBarrierWriteWithoutTransaction { dbResult in do { let db = try dbResult.get() - if let lastMigration = self._migrations.last { - try self.migrate(db, upTo: lastMigration.identifier) + if let lastMigration = _migrations.last { + try migrate(db, upTo: lastMigration.identifier) } completion(.success(db)) } catch { @@ -409,7 +409,7 @@ public struct DatabaseMigrator { if eraseDatabaseOnSchemaChange { var needsErase = false try db.inTransaction(.deferred) { - let appliedIdentifiers = try self.appliedIdentifiers(db) + let appliedIdentifiers = try appliedIdentifiers(db) let knownIdentifiers = Set(_migrations.map { $0.identifier }) if !appliedIdentifiers.isSubset(of: knownIdentifiers) { // Database contains an unknown migration @@ -446,7 +446,7 @@ public struct DatabaseMigrator { // // So let's create a "regular" temporary database: let tmpURL = URL(fileURLWithPath: NSTemporaryDirectory()) - .appendingPathComponent(ProcessInfo().globallyUniqueString) + .appendingPathComponent(ProcessInfo.processInfo.globallyUniqueString) defer { try? FileManager().removeItem(at: tmpURL) } @@ -457,7 +457,13 @@ public struct DatabaseMigrator { } }() - if try db.schema(.main) != tmpSchema { + // Only compare user objects + func isUserObject(_ object: SchemaObject) -> Bool { + !Database.isSQLiteInternalTable(object.name) && !Database.isGRDBInternalTable(object.name) + } + let tmpUserSchema = tmpSchema.filter(isUserObject) + let userSchema = try db.schema(.main).filter(isUserObject) + if userSchema != tmpUserSchema { needsErase = true return .commit } @@ -490,7 +496,7 @@ extension DatabaseMigrator { /// - parameter writer: A DatabaseWriter. /// where migrations should apply. /// - parameter scheduler: A Combine Scheduler. - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func migratePublisher( _ writer: some DatabaseWriter, receiveOn scheduler: some Scheduler = DispatchQueue.main) @@ -508,7 +514,7 @@ extension DatabaseMigrator { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension DatabasePublishers { /// A publisher that migrates a database. /// diff --git a/GRDB/PrivacyInfo.xcprivacy b/GRDB/PrivacyInfo.xcprivacy new file mode 100644 index 0000000000..d75908da05 --- /dev/null +++ b/GRDB/PrivacyInfo.xcprivacy @@ -0,0 +1,14 @@ + + + + + NSPrivacyTracking + + NSPrivacyCollectedDataTypes + + NSPrivacyTrackingDomains + + NSPrivacyAccessedAPITypes + + + diff --git a/GRDB/QueryInterface/ForeignKey.swift b/GRDB/QueryInterface/ForeignKey.swift index f89ee964ce..fbe27461d6 100644 --- a/GRDB/QueryInterface/ForeignKey.swift +++ b/GRDB/QueryInterface/ForeignKey.swift @@ -80,7 +80,7 @@ /// using: Book.translatorForeignKey) /// } /// ``` -public struct ForeignKey: Equatable { +public struct ForeignKey: Equatable, Sendable { var originColumns: [String] var destinationColumns: [String]? diff --git a/GRDB/QueryInterface/Request/Association/Association.swift b/GRDB/QueryInterface/Request/Association/Association.swift index 222ac6dcbd..debe9590a3 100644 --- a/GRDB/QueryInterface/Request/Association/Association.swift +++ b/GRDB/QueryInterface/Request/Association/Association.swift @@ -222,6 +222,12 @@ extension Association { relation = relation.unordered() } } + + public func withStableOrder() -> Self { + withDestinationRelation { relation in + relation = relation.withStableOrder() + } + } } // TableRequest conformance diff --git a/GRDB/QueryInterface/Request/Association/AssociationAggregate.swift b/GRDB/QueryInterface/Request/Association/AssociationAggregate.swift index 08b8af6f85..028cfcc42d 100644 --- a/GRDB/QueryInterface/Request/Association/AssociationAggregate.swift +++ b/GRDB/QueryInterface/Request/Association/AssociationAggregate.swift @@ -127,7 +127,7 @@ extension AssociationToMany { /// } /// ``` public func average(_ expression: some SQLSpecificExpressible) -> AssociationAggregate { - let aggregate = makeAggregate(.aggregate("AVG", [expression.sqlExpression])) + let aggregate = makeAggregate(.function("AVG", [expression.sqlExpression])) if let column = expression as? any ColumnExpression { let name = key.singularizedName return aggregate.forKey("average\(name.uppercasingFirstCharacter)\(column.name.uppercasingFirstCharacter)") @@ -174,7 +174,7 @@ extension AssociationToMany { /// } /// ``` public func max(_ expression: some SQLSpecificExpressible) -> AssociationAggregate { - let aggregate = makeAggregate(.aggregate("MAX", [expression.sqlExpression])) + let aggregate = makeAggregate(.function("MAX", [expression.sqlExpression])) if let column = expression as? any ColumnExpression { let name = key.singularizedName return aggregate.forKey("max\(name.uppercasingFirstCharacter)\(column.name.uppercasingFirstCharacter)") @@ -221,7 +221,7 @@ extension AssociationToMany { /// } /// ``` public func min(_ expression: some SQLSpecificExpressible) -> AssociationAggregate { - let aggregate = makeAggregate(.aggregate("MIN", [expression.sqlExpression])) + let aggregate = makeAggregate(.function("MIN", [expression.sqlExpression])) if let column = expression as? any ColumnExpression { let name = key.singularizedName return aggregate.forKey("min\(name.uppercasingFirstCharacter)\(column.name.uppercasingFirstCharacter)") @@ -271,7 +271,7 @@ extension AssociationToMany { /// } /// ``` public func sum(_ expression: some SQLSpecificExpressible) -> AssociationAggregate { - let aggregate = makeAggregate(.aggregate("SUM", [expression.sqlExpression])) + let aggregate = makeAggregate(.function("SUM", [expression.sqlExpression])) if let column = expression as? any ColumnExpression { let name = key.singularizedName return aggregate.forKey("\(name)\(column.name.uppercasingFirstCharacter)Sum") @@ -321,7 +321,7 @@ extension AssociationToMany { /// } /// ``` public func total(_ expression: some SQLSpecificExpressible) -> AssociationAggregate { - let aggregate = makeAggregate(.aggregate("TOTAL", [expression.sqlExpression])) + let aggregate = makeAggregate(.function("TOTAL", [expression.sqlExpression])) if let column = expression as? any ColumnExpression { let name = key.singularizedName // Yes we use the `Sum` suffix instead of `Total`. Both `total(_:)` @@ -835,7 +835,7 @@ extension AssociationAggregate { } } -// MARK: - IFNULL(...) +// MARK: - Functions extension AssociationAggregate { /// The `IFNULL` SQL function. @@ -854,8 +854,6 @@ extension AssociationAggregate { } } -// MARK: - ABS(...) - /// The `ABS` SQL function. public func abs(_ aggregate: AssociationAggregate) -> AssociationAggregate @@ -863,7 +861,18 @@ public func abs(_ aggregate: AssociationAggregate) aggregate.map(abs) } -// MARK: - LENGTH(...) +/// The `CAST` SQL function. +/// +/// Related SQLite documentation: +public func cast( + _ aggregate: AssociationAggregate, + as storageClass: Database.StorageClass) +-> AssociationAggregate +{ + aggregate + .map { cast($0, as: storageClass) } + .with { $0.key = aggregate.key } // Preserve key +} /// The `LENGTH` SQL function. public func length(_ aggregate: AssociationAggregate) diff --git a/GRDB/QueryInterface/Request/Association/BelongsToAssociation.swift b/GRDB/QueryInterface/Request/Association/BelongsToAssociation.swift index ea04513d62..0bf5734522 100644 --- a/GRDB/QueryInterface/Request/Association/BelongsToAssociation.swift +++ b/GRDB/QueryInterface/Request/Association/BelongsToAssociation.swift @@ -28,26 +28,23 @@ /// } /// try db.create(table: "book") { t in /// t.autoIncrementedPrimaryKey("id") -/// t.column("authorId", .integer) // (2) +/// t.belongsTo("author", onDelete: .cascade) // (2) /// .notNull() // (3) -/// .indexed() // (4) -/// .references("author", onDelete: .cascade) // (5) /// t.column("title", .text) /// } /// ``` /// /// 1. The author table has a primary key. /// 2. The `book.authorId` column is used to link a book to the author it -/// belongs to. +/// belongs to. This column is indexed in order to ease the selection of +/// an author's books. A foreign key is defined from `book.authorId` +/// column to `authors.id`, so that SQLite guarantees that no book refers +/// to a missing author. The `onDelete: .cascade` option has SQLite +/// automatically delete all of an author's books when that author is +/// deleted. See for +/// more information. /// 3. Make the `book.authorId` column not null if you want SQLite to guarantee /// that all books have an author. -/// 4. Create an index on the `book.authorId` column in order to ease the -/// selection of an author's books. -/// 5. Create a foreign key from `book.authorId` column to `author.id`, so that -/// SQLite guarantees that no book refers to a missing author. The -/// `onDelete: .cascade` option has SQLite automatically delete all of an -/// author's books when that author is deleted. -/// See for more information. /// /// The example above uses auto-incremented primary keys. But generally /// speaking, all primary keys are supported. @@ -77,7 +74,7 @@ public struct BelongsToAssociation { originIsLeft: true) let associationKey: SQLAssociationKey - if let key = key { + if let key { associationKey = .fixedSingular(key) } else { associationKey = .inflected(destinationTable) diff --git a/GRDB/QueryInterface/Request/Association/HasManyAssociation.swift b/GRDB/QueryInterface/Request/Association/HasManyAssociation.swift index 2b39730051..dd88860503 100644 --- a/GRDB/QueryInterface/Request/Association/HasManyAssociation.swift +++ b/GRDB/QueryInterface/Request/Association/HasManyAssociation.swift @@ -28,26 +28,22 @@ /// } /// try db.create(table: "book") { t in /// t.autoIncrementedPrimaryKey("id") -/// t.column("authorId", .integer) // (2) +/// t.belongsTo("author", onDelete: .cascade) // (2) /// .notNull() // (3) -/// .indexed() // (4) -/// .references("author", onDelete: .cascade) // (5) /// t.column("title", .text) /// } /// ``` /// /// 1. The author table has a primary key. -/// 2. The `book.authorId` column is used to link a book to the author it -/// belongs to. +/// belongs to. This column is indexed in order to ease the selection of +/// an author's books. A foreign key is defined from `book.authorId` +/// column to `authors.id`, so that SQLite guarantees that no book refers +/// to a missing author. The `onDelete: .cascade` option has SQLite +/// automatically delete all of an author's books when that author is +/// deleted. See for +/// more information. /// 3. Make the `book.authorId` column not null if you want SQLite to guarantee /// that all books have an author. -/// 4. Create an index on the `book.authorId` column in order to ease the -/// selection of an author's books. -/// 5. Create a foreign key from `book.authorId` column to `author.id`, so that -/// SQLite guarantees that no book refers to a missing author. The -/// `onDelete: .cascade` option has SQLite automatically delete all of an -/// author's books when that author is deleted. -/// See for more information. /// /// The example above uses auto-incremented primary keys. But generally /// speaking, all primary keys are supported. @@ -77,7 +73,7 @@ public struct HasManyAssociation { originIsLeft: false) let associationKey: SQLAssociationKey - if let key = key { + if let key { associationKey = .fixedPlural(key) } else { associationKey = .inflected(destinationTable) diff --git a/GRDB/QueryInterface/Request/Association/HasOneAssociation.swift b/GRDB/QueryInterface/Request/Association/HasOneAssociation.swift index 36f9e7708c..16636e1388 100644 --- a/GRDB/QueryInterface/Request/Association/HasOneAssociation.swift +++ b/GRDB/QueryInterface/Request/Association/HasOneAssociation.swift @@ -29,10 +29,9 @@ /// } /// try db.create(table: "demographics") { t in /// t.autoIncrementedPrimaryKey("id") -/// t.column("countryCode", .text) // (2) +/// t.belongsTo("country", onDelete: .cascade) // (2) /// .notNull() // (3) /// .unique() // (4) -/// .references("country", onDelete: .cascade) // (5) /// t.column("population", .integer) /// t.column("density", .double) /// } @@ -40,16 +39,17 @@ /// /// 1. The country table has a primary key. /// 2. The `demographics.countryCode` column is used to link a demographic -/// profile to the country it belongs to. +/// profile to the country it belongs to. This column is indexed in order +/// to ease the selection of the demographics of a country. A foreign key +/// is defined from `demographics.countryCode` column to `country.code`, +/// so that SQLite guarantees that no profile refers to a missing +/// country. The `onDelete: .cascade` option has SQLite automatically +/// delete a profile when its country is deleted. See +/// for more information. /// 3. Make the `demographics.countryCode` column not null if you want SQLite to /// guarantee that all profiles are linked to a country. /// 4. Create a unique index on the `demographics.countryCode` column in order /// to guarantee the unicity of any country's demographics. -/// 5. Create a foreign key from `demographics.countryCode` column to -/// `country.code`, so that SQLite guarantees that no profile refers to a -/// missing country. The `onDelete: .cascade` option has SQLite automatically -/// delete a profile when its country is deleted. -/// See for more information. /// /// The example above uses a string primary for the country table. But generally /// speaking, all primary keys are supported. @@ -80,7 +80,7 @@ public struct HasOneAssociation { originIsLeft: false) let associationKey: SQLAssociationKey - if let key = key { + if let key { associationKey = .fixedSingular(key) } else { associationKey = .inflected(destinationTable) diff --git a/GRDB/QueryInterface/Request/CommonTableExpression.swift b/GRDB/QueryInterface/Request/CommonTableExpression.swift index 813677ebe2..17d192fb2a 100644 --- a/GRDB/QueryInterface/Request/CommonTableExpression.swift +++ b/GRDB/QueryInterface/Request/CommonTableExpression.swift @@ -334,7 +334,7 @@ struct SQLCTE { /// The number of columns in the common table expression. func columnCount(_ db: Database) throws -> Int { - if let columns = columns { + if let columns { // No need to hit the database return columns.count } diff --git a/GRDB/QueryInterface/Request/QueryInterfaceRequest.swift b/GRDB/QueryInterface/Request/QueryInterfaceRequest.swift index e3415ff5aa..c573a2f3c6 100644 --- a/GRDB/QueryInterface/Request/QueryInterfaceRequest.swift +++ b/GRDB/QueryInterface/Request/QueryInterfaceRequest.swift @@ -100,8 +100,13 @@ extension QueryInterfaceRequest: FetchRequest { let associations = relation.prefetchedAssociations if associations.isEmpty == false { // Eager loading of prefetched associations - preparedRequest.supplementaryFetch = { [relation] db, rows in - try prefetch(db, associations: associations, from: relation, into: rows) + preparedRequest.supplementaryFetch = { [relation] db, rows, willExecuteSupplementaryRequest in + try prefetch( + db, + associations: associations, + from: relation, + into: rows, + willExecuteSupplementaryRequest: willExecuteSupplementaryRequest) } } return preparedRequest @@ -339,6 +344,12 @@ extension QueryInterfaceRequest: OrderedRequest { $0.relation = $0.relation.unordered() } } + + public func withStableOrder() -> QueryInterfaceRequest { + with { + $0.relation = $0.relation.withStableOrder() + } + } } extension QueryInterfaceRequest: AggregatingRequest { @@ -436,7 +447,7 @@ extension QueryInterfaceRequest: DerivableRequest { } } - public func with(_ cte: CommonTableExpression) -> Self { + public func with(_ cte: CommonTableExpression) -> Self { with { $0.relation.ctes[cte.tableName] = cte.cte } @@ -533,7 +544,7 @@ extension QueryInterfaceRequest { return try SQLQueryGenerator(relation: relation).makeDeleteStatement(db, selection: selection) } - /// Returns a cursor over the record deleted by a + /// Returns a cursor over the records deleted by a /// `DELETE RETURNING` statement. /// /// For example: @@ -553,7 +564,7 @@ extension QueryInterfaceRequest { /// . /// /// - parameter db: A database connection. - /// - returns: A ``RecordCursor`` over deleted records. + /// - returns: A ``RecordCursor`` over the deleted records. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. public func deleteAndFetchCursor(_ db: Database) throws -> RecordCursor @@ -636,7 +647,7 @@ extension QueryInterfaceRequest { /// - returns: A prepared statement. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. /// - precondition: `selection` is not empty. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func deleteAndFetchStatement( _ db: Database, selection: [any SQLSelectable]) @@ -646,7 +657,7 @@ extension QueryInterfaceRequest { return try SQLQueryGenerator(relation: relation).makeDeleteStatement(db, selection: selection) } - /// Returns a cursor over the record deleted by a + /// Returns a cursor over the records deleted by a /// `DELETE RETURNING` statement. /// /// For example: @@ -666,9 +677,9 @@ extension QueryInterfaceRequest { /// . /// /// - parameter db: A database connection. - /// - returns: A ``RecordCursor`` over deleted records. + /// - returns: A ``RecordCursor`` over the deleted records. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func deleteAndFetchCursor(_ db: Database) throws -> RecordCursor where RowDecoder: FetchableRecord & TableRecord @@ -696,7 +707,7 @@ extension QueryInterfaceRequest { /// - parameter db: A database connection. /// - returns: An array of deleted records. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func deleteAndFetchAll(_ db: Database) throws -> [RowDecoder] where RowDecoder: FetchableRecord & TableRecord @@ -723,7 +734,7 @@ extension QueryInterfaceRequest { /// - parameter db: A database connection. /// - returns: A set of deleted records. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func deleteAndFetchSet(_ db: Database) throws -> Set where RowDecoder: FetchableRecord & TableRecord & Hashable @@ -865,7 +876,7 @@ extension QueryInterfaceRequest { return updateStatement } - /// Returns a cursor over the record updated by an + /// Returns a cursor over the records updated by an /// `UPDATE RETURNING` statement. /// /// For example: @@ -995,7 +1006,7 @@ extension QueryInterfaceRequest { /// - returns: A prepared statement. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. /// - precondition: `selection` and `assignments` are not empty. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetchStatement( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -1018,7 +1029,7 @@ extension QueryInterfaceRequest { return updateStatement } - /// Returns a cursor over the record updated by an + /// Returns a cursor over the records updated by an /// `UPDATE RETURNING` statement. /// /// For example: @@ -1043,7 +1054,7 @@ extension QueryInterfaceRequest { /// - returns: A cursor over the updated records. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. /// - precondition: `assignments` is not empty. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetchCursor( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -1081,7 +1092,7 @@ extension QueryInterfaceRequest { /// - returns: An array of updated records. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. /// - precondition: `assignments` is not empty. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetchAll( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -1114,7 +1125,7 @@ extension QueryInterfaceRequest { /// - returns: A set of updated records. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. /// - precondition: `assignments` is not empty. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetchSet( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -1279,6 +1290,90 @@ extension ColumnExpression { public static func /= (column: Self, value: some SQLExpressible) -> ColumnAssignment { column.set(to: column / value) } + + /// Creates an assignment that applies a bitwise and. + /// + /// For example: + /// + /// ```swift + /// Column("mask") &= 2 + /// Column("mask") &= Column("other") + /// ``` + /// + /// Usage: + /// + /// ```swift + /// try dbQueue.write { db in + /// // UPDATE player SET score = score & 2 + /// try Player.updateAll(db, Column("mask") &= 2) + /// } + /// ``` + public static func &= (column: Self, value: some SQLExpressible) -> ColumnAssignment { + column.set(to: column & value) + } + + /// Creates an assignment that applies a bitwise or. + /// + /// For example: + /// + /// ```swift + /// Column("mask") |= 2 + /// Column("mask") |= Column("other") + /// ``` + /// + /// Usage: + /// + /// ```swift + /// try dbQueue.write { db in + /// // UPDATE player SET score = score | 2 + /// try Player.updateAll(db, Column("mask") |= 2) + /// } + /// ``` + public static func |= (column: Self, value: some SQLExpressible) -> ColumnAssignment { + column.set(to: column | value) + } + + /// Creates an assignment that applies a bitwise left shift. + /// + /// For example: + /// + /// ```swift + /// Column("mask") <<= 2 + /// Column("mask") <<= Column("other") + /// ``` + /// + /// Usage: + /// + /// ```swift + /// try dbQueue.write { db in + /// // UPDATE player SET score = score << 2 + /// try Player.updateAll(db, Column("mask") <<= 2) + /// } + /// ``` + public static func <<= (column: Self, value: some SQLExpressible) -> ColumnAssignment { + column.set(to: column << value) + } + + /// Creates an assignment that applies a bitwise right shift. + /// + /// For example: + /// + /// ```swift + /// Column("mask") >>= 2 + /// Column("mask") >>= Column("other") + /// ``` + /// + /// Usage: + /// + /// ```swift + /// try dbQueue.write { db in + /// // UPDATE player SET score = score >> 2 + /// try Player.updateAll(db, Column("mask") >>= 2) + /// } + /// ``` + public static func >>= (column: Self, value: some SQLExpressible) -> ColumnAssignment { + column.set(to: column >> value) + } } // MARK: - Eager loading of hasMany associations @@ -1290,11 +1385,14 @@ extension ColumnExpression { /// - parameter associations: Prefetched associations. /// - parameter originRows: The rows that need to be extended with prefetched rows. /// - parameter originQuery: The query that was used to fetch `originRows`. +/// - parameter willExecuteSupplementaryRequest: A closure executed before a +/// supplementary fetch is performed. private func prefetch( _ db: Database, associations: [_SQLAssociation], from originRelation: SQLRelation, - into originRows: [Row]) throws + into originRows: [Row], + willExecuteSupplementaryRequest: WillExecuteSupplementaryRequest?) throws { guard let firstOriginRow = originRows.first else { // No rows -> no prefetch @@ -1355,7 +1453,7 @@ private func prefetch( // useless, and we only need to select pivot columns: let originRelation = originRelation .unorderedUnlessLimited() // only preserve ordering in the CTE if limited - .removingChildrenForPrefetchedAssociations() + .removingPrefetchedAssociations() .selectOnly(leftColumns.map { SQLExpression.column($0).sqlSelection }) let originCTE = CommonTableExpression( named: "grdb_base", @@ -1389,6 +1487,10 @@ private func prefetch( annotatedWith: pivotColumns) } + if let willExecuteSupplementaryRequest { + // Support for `Database.dumpRequest` + try willExecuteSupplementaryRequest(.init(prefetchRequest), association.keyPath) + } let prefetchedRows = try prefetchRequest.fetchAll(db) let prefetchedGroups = prefetchedRows.grouped(byDatabaseValuesOnColumns: pivotColumns.map { "grdb_\($0)" }) let groupingIndexes = firstOriginRow.indexes(forColumns: leftColumns) diff --git a/GRDB/QueryInterface/Request/RequestProtocols.swift b/GRDB/QueryInterface/Request/RequestProtocols.swift index 553511e196..def5c2f52c 100644 --- a/GRDB/QueryInterface/Request/RequestProtocols.swift +++ b/GRDB/QueryInterface/Request/RequestProtocols.swift @@ -214,6 +214,7 @@ extension SelectionRequest { /// /// ### The WHERE and JOIN ON Clauses /// +/// - ``all()`` /// - ``filter(_:)`` /// - ``filter(literal:)`` /// - ``filter(sql:arguments:)`` @@ -291,6 +292,13 @@ extension FilteredRequest { public func none() -> Self { filterWhenConnected { _ in false } } + + /// Returns `self`: a request that fetches all rows from this request. + /// + /// This method, which does nothing, exists in order to match ``none()``. + public func all() -> Self { + self + } } // MARK: - TableRequest @@ -316,7 +324,8 @@ extension FilteredRequest { /// - ``filter(key:)-2te6v`` /// - ``filter(keys:)-6ggt1`` /// - ``filter(keys:)-8fbn9`` -/// - ``matching(_:)`` +/// - ``matching(_:)-3s3zr`` +/// - ``matching(_:)-7c1e8`` /// /// ### The GROUP BY and HAVING Clauses /// @@ -433,7 +442,11 @@ extension TableRequest where Self: FilteredRequest, Self: TypedRequest { // make it impractical to define `filter(id:)`, `fetchOne(_:key:)`, // `deleteAll(_:ids:)` etc. if let recordType = RowDecoder.self as? any EncodableRecord.Type { - if Sequence.Element.self == Date.self || Sequence.Element.self == Optional.self { + if Sequence.Element.self == Data.self || Sequence.Element.self == Optional.self { + let strategy = recordType.databaseDataEncodingStrategy + let keys = keys.compactMap { ($0 as! Data?).flatMap(strategy.encode)?.databaseValue } + return filter(rawKeys: keys) + } else if Sequence.Element.self == Date.self || Sequence.Element.self == Optional.self { let strategy = recordType.databaseDateEncodingStrategy let keys = keys.compactMap { ($0 as! Date?).flatMap(strategy.encode)?.databaseValue } return filter(rawKeys: keys) @@ -570,7 +583,7 @@ extension TableRequest where Self: FilteredRequest, Self: TypedRequest { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension TableRequest where Self: FilteredRequest, Self: TypedRequest, @@ -892,6 +905,7 @@ extension AggregatingRequest { /// - ``orderWhenConnected(_:)`` /// - ``reversed()`` /// - ``unordered()`` +/// - ``withStableOrder()`` public protocol OrderedRequest { /// Sorts the fetched rows according to the given SQL ordering terms. /// @@ -952,6 +966,14 @@ public protocol OrderedRequest { /// .unordered() /// ``` func unordered() -> Self + + /// Returns a request with a stable order. + /// + /// The returned request lifts ordering ambiguities and always return + /// its results in the same order. + /// + /// The purpose of this method is to make requests testable. + func withStableOrder() -> Self } extension OrderedRequest { @@ -1327,6 +1349,7 @@ extension JoinableRequest where Self: SelectionRequest { /// /// ### The WHERE Clause /// +/// - ``FilteredRequest/all()`` /// - ``FilteredRequest/filter(_:)`` /// - ``TableRequest/filter(id:)`` /// - ``TableRequest/filter(ids:)`` @@ -1337,7 +1360,8 @@ extension JoinableRequest where Self: SelectionRequest { /// - ``FilteredRequest/filter(literal:)`` /// - ``FilteredRequest/filter(sql:arguments:)`` /// - ``FilteredRequest/filterWhenConnected(_:)`` -/// - ``TableRequest/matching(_:)`` +/// - ``TableRequest/matching(_:)-3s3zr`` +/// - ``TableRequest/matching(_:)-7c1e8`` /// - ``FilteredRequest/none()`` /// /// ### The GROUP BY and HAVING Clauses @@ -1363,6 +1387,7 @@ extension JoinableRequest where Self: SelectionRequest { /// - ``TableRequest/orderByPrimaryKey()`` /// - ``OrderedRequest/reversed()`` /// - ``OrderedRequest/unordered()`` +/// - ``OrderedRequest/withStableOrder()`` /// /// ### Associations /// diff --git a/GRDB/QueryInterface/SQL/Column.swift b/GRDB/QueryInterface/SQL/Column.swift index 331789d453..535166789e 100644 --- a/GRDB/QueryInterface/SQL/Column.swift +++ b/GRDB/QueryInterface/SQL/Column.swift @@ -9,7 +9,8 @@ /// ### Deriving SQL Expressions /// /// - ``detached`` -/// - ``match(_:)`` +/// - ``match(_:)-727nk`` +/// - ``match(_:)-1vvo8`` /// /// ### Creating Column Assignments /// @@ -26,6 +27,7 @@ public protocol ColumnExpression: SQLSpecificExpressible { } extension ColumnExpression { + /// Returns an SQL column. public var sqlExpression: SQLExpression { .column(name) } @@ -64,17 +66,33 @@ extension ColumnExpression where Self == Column { /// A column in a database table. /// +/// For example: +/// +/// ```swift +/// struct Player: TableRecord { +/// var score: Int +/// } +/// +/// let maximumScore = try dbQueue.read { db in +/// // SELECT MAX(score) FROM player +/// try Player +/// .select(max(Column("score")), as: Int.self) +/// .fetchOne(db) +/// } +/// ``` +/// /// ## Topics /// /// ### Standard Columns /// /// - ``rowID-3bn70`` +/// - ``rank`` /// /// ### Creating A Column /// /// - ``init(_:)-5grmu`` /// - ``init(_:)-7xc4z`` -public struct Column { +public struct Column: Sendable { /// The hidden rowID column. public static let rowID = Column("rowid") diff --git a/GRDB/QueryInterface/SQL/DatabasePromise.swift b/GRDB/QueryInterface/SQL/DatabasePromise.swift index 815e15f777..9d497b5846 100644 --- a/GRDB/QueryInterface/SQL/DatabasePromise.swift +++ b/GRDB/QueryInterface/SQL/DatabasePromise.swift @@ -38,7 +38,13 @@ struct DatabasePromise { /// Returns a promise whose value is transformed by the given closure. func map(_ transform: @escaping (T) throws -> U) -> DatabasePromise { DatabasePromise { db in - try transform(self.resolve(db)) + try transform(resolve(db)) } } } + +extension DatabasePromise: CustomStringConvertible { + var description: String { + "DatabasePromise<\(T.self)>" + } +} diff --git a/GRDB/QueryInterface/SQL/SQLAssociation.swift b/GRDB/QueryInterface/SQL/SQLAssociation.swift index aa106a319e..4d9a8d30c7 100644 --- a/GRDB/QueryInterface/SQL/SQLAssociation.swift +++ b/GRDB/QueryInterface/SQL/SQLAssociation.swift @@ -120,10 +120,10 @@ public struct _SQLAssociation { let reversedSteps = zip(steps, steps.dropFirst()) .map { (step, nextStep) in // Intermediate steps are not selected, and including(all:) - // children are useless: + // children can't impact the destination relation: let relation = step.relation .selectOnly([]) - .removingChildrenForPrefetchedAssociations() + .removingPrefetchedAssociations() // Don't interfere with user-defined keys that could be added later let key = step.key.with { diff --git a/GRDB/QueryInterface/SQL/SQLExpression.swift b/GRDB/QueryInterface/SQL/SQLExpression.swift index e9de5892d5..ad772c8dff 100644 --- a/GRDB/QueryInterface/SQL/SQLExpression.swift +++ b/GRDB/QueryInterface/SQL/SQLExpression.swift @@ -37,6 +37,25 @@ public struct SQLExpression { private var impl: Impl + /// The preferred interpretation of the expression in JSON + /// building contexts (see `jsonBuilderExpression`). + /// + /// ```swift + /// // Considering: + /// // JSON_ARRAY('[1, 2, 3]') → '["[1, 2, 3]"]' + /// // JSON_ARRAY(JSON('[1, 2, 3]')) → [[1,2,3]] + /// + /// // Compare an expression with preferredJSONInterpretation = .unspecified: + /// // JSON_ARRAY("info") + /// Database.jsonArray([Column("info")]) + /// + /// // ...with an expression with preferredJSONInterpretation = .jsonValue: + /// // JSON_ARRAY(JSON("info")) + /// Database.jsonArray([Column("info").jsonValue]) + /// Database.jsonArray([JSONColumn("info")]) + /// ``` + var preferredJSONInterpretation = JSONInterpretation.deferredToSQLite + /// The private implementation of the public `SQLExpression`. private enum Impl { /// A column. @@ -71,6 +90,11 @@ public struct SQLExpression { /// A literal SQL expression case literal(SQL) + /// The `CAST(expr AS storage-class)` expression. + /// + /// See . + indirect case cast(SQLExpression, Database.StorageClass) + /// The `BETWEEN` and `NOT BETWEEN` operators. /// /// BETWEEN AND @@ -146,8 +170,14 @@ public struct SQLExpression { /// /// (, ...) /// (DISTINCT ) - case function(String, aggregate: Bool, distinct: Bool, arguments: [SQLExpression]) + indirect case simpleFunction(SQLSimpleFunctionInvocation) + /// An aggregate function call. + /// + /// (, ...) + /// (DISTINCT ) + indirect case aggregateFunction(SQLAggregateFunctionInvocation) + /// An expression that checks for zero or positive values. /// /// = 0 @@ -178,6 +208,93 @@ public struct SQLExpression { /// - For views, it is true iff any column is not null. /// - For CTEs, it is not implemented yet. case qualifiedExists(TableAlias, isNegated: Bool) + + /// Returns a qualified expression + func qualified(with alias: TableAlias) -> Impl { + switch self { + case .databaseValue, + .qualifiedColumn, + .qualifiedFastPrimaryKey, + .qualifiedExists, + .subquery, + .exists: + return self + + case let .column(name): + return .qualifiedColumn(name, alias) + + case let .rowValue(expressions): + return .rowValue(expressions.map { $0.qualified(with: alias) }) + + case let .literal(sqlLiteral): + return .literal(sqlLiteral.qualified(with: alias)) + + case let .cast(expression, storageClass): + return .cast(expression.qualified(with: alias), storageClass) + + case let .between( + expression: expression, + lowerBound: lowerBound, + upperBound: upperBound, + isNegated: isNegated): + + return .between( + expression: expression.qualified(with: alias), + lowerBound: lowerBound.qualified(with: alias), + upperBound: upperBound.qualified(with: alias), + isNegated: isNegated) + + case let .binary(op, lhs, rhs): + return .binary(op, lhs.qualified(with: alias), rhs.qualified(with: alias)) + + case let .escapableBinary(op, lhs, rhs, escape): + return .escapableBinary( + op, + lhs.qualified(with: alias), + rhs.qualified(with: alias), + escape: escape?.qualified(with: alias)) + + case let .associativeBinary(op, expressions): + return .associativeBinary(op, expressions.map { $0.qualified(with: alias) }) + + case let .in(expression, collection, isNegated: isNegated): + return .in( + expression.qualified(with: alias), + collection.qualified(with: alias), + isNegated: isNegated + ) + + case let .unary(op, expression): + return .unary(op, expression.qualified(with: alias)) + + case let .compare(op, lhs, rhs): + return .compare(op, lhs.qualified(with: alias), rhs.qualified(with: alias)) + + case let .tableMatch(a, expression): + return .tableMatch(a, expression.qualified(with: alias)) + + case let .not(expression): + return .not(expression.qualified(with: alias)) + + case let .collated(expression, collationName): + return .collated(expression.qualified(with: alias), collationName) + + case .countAll: + return .countAll + + case let .simpleFunction(invocation): + return .simpleFunction(invocation.qualified(with: alias)) + + case let .aggregateFunction(invocation): + return .aggregateFunction(invocation.qualified(with: alias)) + + case let .isEmpty(expression, isNegated: isNegated): + return .isEmpty(expression.qualified(with: alias), isNegated: isNegated) + + case .fastPrimaryKey: + return .qualifiedFastPrimaryKey(alias) + } + } } /// `BooleanTest` supports truthiness tests. @@ -206,7 +323,7 @@ public struct SQLExpression { /// 1000.databaseValue] /// let request = Player.select(values.joined(operator: .add)) /// ``` - public struct AssociativeBinaryOperator: Hashable { + public struct AssociativeBinaryOperator: Hashable, Sendable { /// The SQL operator let sql: String @@ -215,7 +332,7 @@ public struct SQLExpression { /// If true, (a • b) • c is strictly equal to a • (b • c). /// - /// `AND`, `OR`, `||` (concat) are stricly associative. + /// `AND`, `OR`, `||` (concat), `&`, `|` are stricly associative. /// /// `+` and `*` are not stricly associative when applied to floating /// point values. @@ -226,7 +343,7 @@ public struct SQLExpression { /// /// `||` (concat) is bijective. /// - /// `AND`, `OR`, `+` and `*` are not. + /// `AND`, `OR`, `+` and `*`, `&`, `|` are not. let isBijective: Bool /// Creates a binary operator @@ -306,6 +423,34 @@ public struct SQLExpression { neutralValue: "".databaseValue, strictlyAssociative: true, bijective: true) + + /// The `&` bitwise AND SQL operator. + /// + /// For example: + /// + /// ```swift + /// // mask & 2 + /// [Column("mask"), 2.databaseValue].joined(operator: .bitwiseAnd) + /// ``` + public static let bitwiseAnd = AssociativeBinaryOperator( + sql: "&", + neutralValue: (-1).databaseValue, + strictlyAssociative: true, + bijective: false) + + /// The `|` bitwise OR SQL operator. + /// + /// For example: + /// + /// ```swift + /// // mask | 2 + /// [Column("mask"), 2.databaseValue].joined(operator: .bitwiseOr) + /// ``` + public static let bitwiseOr = AssociativeBinaryOperator( + sql: "|", + neutralValue: 0.databaseValue, + strictlyAssociative: true, + bijective: false) } /// `BinaryOperator` is an SQLite binary operator, such as `>`, `=`, etc. @@ -316,11 +461,22 @@ public struct SQLExpression { /// The SQL operator let sql: String - /// Creates a binary operator + /// A boolean value indicating if the operator is known to return a + /// JSON value. /// - /// BinaryOperator("-") - init(_ sql: String) { + /// A false value does not provide any information. + let isJSONValue: Bool + + /// Creates a binary operator. + /// + /// For example: + /// + /// ``` + /// BinaryOperator("-") + /// ``` + init(_ sql: String, isJSONValue: Bool = false) { self.sql = sql + self.isJSONValue = isJSONValue } /// The `<` binary operator @@ -343,6 +499,28 @@ public struct SQLExpression { /// The `MATCH` binary operator static let match = BinaryOperator("MATCH") + + /// The `<<` bitwise left shift operator + static let leftShift = BinaryOperator("<<") + + /// The `>>` bitwise right shift operator + static let rightShift = BinaryOperator(">>") + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + /// The `->` SQL operator + static let jsonExtractJSON = BinaryOperator("->", isJSONValue: true) + + /// The `->>` SQL operator + static let jsonExtractSQL = BinaryOperator("->>") +#else + /// The `->` SQL operator + @available(iOS 16, macOS 13.2, tvOS 17, watchOS 9, *) // SQLite 3.38+ + static let jsonExtractJSON = BinaryOperator("->", isJSONValue: true) + + /// The `->>` SQL operator + @available(iOS 16, macOS 13.2, tvOS 17, watchOS 9, *) // SQLite 3.38+ + static let jsonExtractSQL = BinaryOperator("->>") +#endif } /// `EscapableBinaryOperator` is an SQLite binary operator that accepts an @@ -399,19 +577,46 @@ public struct SQLExpression { /// The SQL operator let sql: String - /// If true GRDB puts a white space between the operator and the operand. - let needsRightSpace: Bool - - /// Creates an unary operator - /// - /// UnaryOperator("~", needsRightSpace: false) - init(_ sql: String, needsRightSpace: Bool) { + /// Creates an unary operator. + init(_ sql: String) { self.sql = sql - self.needsRightSpace = needsRightSpace } /// The `-` unary operator - static let minus = UnaryOperator("-", needsRightSpace: false) + static let minus = UnaryOperator("-") + + /// The `~` unary operator + static let bitwiseNot = UnaryOperator("~") + } + + /// Describes the interpretation of an expression in a JSON + /// building context. + enum JSONInterpretation { + /// JSON interpretation is deferred to SQLite: + /// + /// ```swift + /// // JSON_ARRAY('[1, 2, 3]') → '["[1, 2, 3]"]' + /// Database.jsonArray(["[1, 2, 3]"]) + /// + /// // JSON_ARRAY(JSON('[1, 2, 3]')) → '[[1, 2, 3]]' + /// Database.jsonArray([Database.json("[1, 2, 3]")]) + /// + /// // JSON_ARRAY("info") + /// Database.jsonArray([Column("info")]) + /// ``` + case deferredToSQLite + + /// Expression is interpreted as a JSON value: + /// + /// ```swift + /// // JSON_ARRAY(JSON('[1, 2, 3]')) → '[[1, 2, 3]]' + /// Database.jsonArray(["[1, 2, 3]"].jsonValue) + /// + /// // JSON_ARRAY(JSON("info")) + /// Database.jsonArray([Column("info").jsonValue]) + /// Database.jsonArray([JSONColumn("info")]) + /// ``` + case jsonValue } } @@ -725,48 +930,166 @@ extension SQLExpression { // MARK: Functions + // TODO: add missing pure functions: + // https://www.sqlite.org/lang_aggfunc.html + // https://www.sqlite.org/lang_datefunc.html + // https://www.sqlite.org/lang_mathfunc.html + private static let knownPureFunctions: Set = [ + "ABS", + "CHAR", + "COALESCE", + "GLOB", + "HEX", + "IFNULL", + "IIF", + "INSTR", + "JSON", + "JSON_ARRAY", + "JSON_GROUP_ARRAY", + "JSON_GROUP_OBJECT", + "JSON_INSERT", + "JSON_OBJECT", + "JSON_PATCH", + "JSON_REMOVE", + "JSON_REPLACE", + "JSON_SET", + "JSON_QUOTE", + "LENGTH", + "LIKE", + "LIKELIHOOD", + "LIKELY", + "LOAD_EXTENSION", + "LOWER", + "LTRIM", + "NULLIF", + "PRINTF", + "QUOTE", + "REPLACE", + "ROUND", + "RTRIM", + "SOUNDEX", + "SQLITE_COMPILEOPTION_GET", + "SQLITE_COMPILEOPTION_USED", + "SQLITE_SOURCE_ID", + "SQLITE_VERSION", + "SUBSTR", + "TRIM", + "TYPEOF", + "UNICODE", + "UNLIKELY", + "UPPER", + "ZEROBLOB", + ] + + private static let knownAggregateFunctions: Set = [ + "AVG", + "COUNT", + "GROUP_CONCAT", + "JSON_GROUP_ARRAY", + "JSON_GROUP_OBJECT", + "MAX", // when single argument + "MIN", // when single argument + "SUM", + "TOTAL", + ] + + private static let knownFunctionsReturningJSONValue: Set = [ + "JSON", + "JSON_ARRAY", + "JSON_GROUP_ARRAY", + "JSON_GROUP_OBJECT", + "JSON_INSERT", + "JSON_OBJECT", + "JSON_PATCH", + "JSON_REMOVE", + "JSON_REPLACE", + "JSON_SET", + "JSON_QUOTE", + ] + /// The `COUNT(*)` expression. static let countAll = SQLExpression(impl: .countAll) - /// The `COUNT` function. - /// - /// COUNT() - static func count(_ expression: SQLExpression) -> Self { - aggregate("COUNT", [expression]) + /// A function call. + static func function(_ functionName: String, _ arguments: [SQLExpression]) -> Self { + let name = functionName.uppercased() + + if (name == "MAX" || name == "MIN") && arguments.count > 1 { + return .simpleFunction( + functionName, + arguments, + isPure: true, + isJSONValue: false) + + } else if Self.knownAggregateFunctions.contains(name) { + return .aggregateFunction( + functionName, + arguments, + isJSONValue: Self.knownFunctionsReturningJSONValue.contains(name)) + + } else { + let isJSONValue: Bool + if name == "JSON_EXTRACT" && arguments.count > 2 { + isJSONValue = true + } else { + isJSONValue = Self.knownFunctionsReturningJSONValue.contains(name) + } + + return .simpleFunction( + functionName, arguments, + isPure: Self.knownPureFunctions.contains(name), + isJSONValue: isJSONValue) + } } - /// The `COUNT(DISTINCT)` function. - /// - /// COUNT(DISTINCT ) - static func countDistinct(_ expression: SQLExpression) -> Self { - distinctAggregate("COUNT", expression) + /// A simple function call. + /// + /// - warning: Don't use this method for aggregate functions! + static func simpleFunction( + _ name: String, + _ arguments: [SQLExpression], + isPure: Bool = false, + isJSONValue: Bool = false) + -> Self + { + .init(impl: .simpleFunction(SQLSimpleFunctionInvocation( + name: name, + arguments: arguments, + isPure: isPure, + isJSONValue: isJSONValue))) } - /// A function call. - /// - /// (, ...) - /// - /// - warning: for aggregate functions, call one of: - /// - `SQLExpression.aggregate(_:_:)`, - /// - `SQLExpression.distinctAggregate(_:_:)`, - /// - `SQLExpression.countDistinct(_:)` - /// - `SQLExpression.countAll`. - static func function(_ name: String, _ arguments: [SQLExpression]) -> Self { - self.init(impl: .function(name, aggregate: false, distinct: false, arguments: arguments)) + /// An aggregate function call. + static func aggregateFunction( + _ name: String, + _ arguments: [SQLExpression], + isDistinct: Bool = false, + ordering: SQLOrdering? = nil, + filter: SQLExpression? = nil, + isJSONValue: Bool = false) + -> Self + { + .init(impl: .aggregateFunction(.init( + name: name, + arguments: arguments, + isDistinct: isDistinct, + ordering: ordering, + filter: filter, + isJSONValue: isJSONValue))) } - /// An aggregate function call. + /// The `COUNT` function. /// - /// (, ...) - static func aggregate(_ name: String, _ arguments: [SQLExpression]) -> Self { - self.init(impl: .function(name, aggregate: true, distinct: false, arguments: arguments)) + /// COUNT() + static func count(_ expression: SQLExpression) -> Self { + function("COUNT", [expression]) } - /// A distinct aggregate function call. + /// The `COUNT(DISTINCT)` function. /// - /// (DISTINCT ) - static func distinctAggregate(_ name: String, _ argument: SQLExpression) -> Self { - self.init(impl: .function(name, aggregate: true, distinct: true, arguments: [argument])) + /// COUNT(DISTINCT ) + static func countDistinct(_ expression: SQLExpression) -> Self { + aggregateFunction("COUNT", [expression], isDistinct: true) } /// An expression that checks for zero or positive values. @@ -777,6 +1100,13 @@ extension SQLExpression { self.init(impl: .isEmpty(expression, isNegated: isNegated)) } + /// The `CAST(expr AS storage-class)` expression. + /// + /// See . + static func cast(_ expression: SQLExpression, as storageClass: Database.StorageClass) -> Self { + self.init(impl: .cast(expression, storageClass)) + } + // MARK: Deferred // TODO: replace with something that can work for WITHOUT ROWID table with a multi-columns primary key. @@ -812,12 +1142,27 @@ extension SQLExpression { } } +// MARK: - Deriving Expressions + +extension SQLExpression { + /// Returns a qualified expression + func qualified(with alias: TableAlias) -> Self { + .init(impl: impl.qualified(with: alias), preferredJSONInterpretation: preferredJSONInterpretation) + } + + func withPreferredJSONInterpretation(_ interpretation: JSONInterpretation) -> Self { + .init(impl: impl, preferredJSONInterpretation: interpretation) + } +} + +// MARK: - Expressions Information + extension SQLExpression { /// The expression as a quoted SQL literal (not public in order to avoid abuses) /// /// try "foo'bar".databaseValue.quotedSQL(db) // "'foo''bar'"" func quotedSQL(_ db: Database) throws -> String { - let context = SQLGenerationContext(db, argumentsSink: .forRawSQL) + let context = SQLGenerationContext(db, argumentsSink: .literalValues) return try sql(context) } @@ -875,18 +1220,8 @@ extension SQLExpression { case let .collated(expression, _): return try expression.column(db, for: alias, acceptsBijection: acceptsBijection) - case let .function(name, aggregate: false, distinct: false, arguments: arguments): - guard acceptsBijection else { - return nil - } - let name = name.uppercased() - if ["HEX", "QUOTE"].contains(name) && arguments.count == 1 { - return try arguments[0].column(db, for: alias, acceptsBijection: acceptsBijection) - } else if name == "IFNULL" && arguments.count == 2 && arguments[1].isConstantInRequest { - return try arguments[0].column(db, for: alias, acceptsBijection: acceptsBijection) - } else { - return nil - } + case let .simpleFunction(invocation) where acceptsBijection: + return try invocation.column(db, for: alias) case let .qualifiedFastPrimaryKey(a): if alias == a { @@ -949,6 +1284,9 @@ extension SQLExpression { } return resultSQL + case let .cast(expression, storageClass): + return try "CAST(\(expression.sql(context, wrappedInParenthesis: false)) AS \(storageClass.rawValue))" + case let .between(expression: expression, lowerBound: lowerBound, upperBound: upperBound, isNegated: isNegated): var resultSQL = try """ \(expression.sql(context, wrappedInParenthesis: true)) \ @@ -979,7 +1317,7 @@ extension SQLExpression { \(op.sql) \ \(rhs.sql(context, wrappedInParenthesis: true)) """ - if let escape = escape { + if let escape { resultSQL += try " ESCAPE \(escape.sql(context, wrappedInParenthesis: true))" } if wrappedInParenthesis { @@ -1021,9 +1359,7 @@ extension SQLExpression { return resultSQL case let .unary(op, expression): - var resultSQL = try op.sql - + (op.needsRightSpace ? " " : "") - + expression.sql(context, wrappedInParenthesis: true) + var resultSQL = try op.sql + expression.sql(context, wrappedInParenthesis: true) if wrappedInParenthesis { resultSQL = "(\(resultSQL))" } @@ -1072,13 +1408,11 @@ extension SQLExpression { case .countAll: return "COUNT(*)" - case let .function(name, aggregate: aggregate, distinct: distinct, arguments: arguments): - assert(!distinct || aggregate, "distinct requires aggregate") - assert(!distinct || arguments.count == 1, "distinct requires a single argument") - return try name - + (distinct ? "(DISTINCT " : "(") - + arguments.map { try $0.sql(context) }.joined(separator: ", ") - + ")" + case let .simpleFunction(invocation): + return try invocation.sql(context) + + case let .aggregateFunction(invocation): + return try invocation.sql(context, wrappedInParenthesis: wrappedInParenthesis) case let .isEmpty(expression, isNegated: isNegated): var resultSQL = try """ @@ -1487,17 +1821,6 @@ extension SQLExpression { } } - private static let knownPureFunctions = [ - "ABS", "CHAR", "COALESCE", "GLOB", "HEX", "IFNULL", - "IIF", "INSTR", "LENGTH", "LIKE", "LIKELIHOOD", - "LIKELY", "LOAD_EXTENSION", "LOWER", "LTRIM", - "NULLIF", "PRINTF", "QUOTE", "REPLACE", "ROUND", - "RTRIM", "SOUNDEX", "SQLITE_COMPILEOPTION_GET", - "SQLITE_COMPILEOPTION_USED", "SQLITE_SOURCE_ID", - "SQLITE_VERSION", "SUBSTR", "TRIM", "TRIM", - "TYPEOF", "UNICODE", "UNLIKELY", "UPPER", "ZEROBLOB", - ] - /// Returns true if the expression has a unique value when SQLite runs /// a request. /// @@ -1517,6 +1840,9 @@ extension SQLExpression { let .associativeBinary(_, expressions): return expressions.allSatisfy(\.isConstantInRequest) + case let .cast(expression, _): + return expression.isConstantInRequest + case let .between(expression: expression, lowerBound: lowerBound, upperBound: upperBound, isNegated: _): return expression.isConstantInRequest && lowerBound.isConstantInRequest @@ -1538,99 +1864,14 @@ extension SQLExpression { let .collated(expression, _): return expression.isConstantInRequest - case let .function(name, aggregate: false, distinct: false, arguments: arguments): - let name = name.uppercased() - guard ((name == "MAX" || name == "MIN") && arguments.count > 1) - || Self.knownPureFunctions.contains(name) - else { - return false // Don't know - assume not constant - } - - return arguments.allSatisfy(\.isConstantInRequest) + case let .simpleFunction(invocation): + return invocation.isConstantInRequest default: return false } } - /// Returns a qualified expression - func qualified(with alias: TableAlias) -> SQLExpression { - switch impl { - case .databaseValue, - .qualifiedColumn, - .qualifiedFastPrimaryKey, - .qualifiedExists, - .subquery, - .exists: - return self - - case let .column(name): - return .qualifiedColumn(name, alias) - - case let .rowValue(expressions): - assert(!expressions.isEmpty) - return .rowValue(expressions.map { $0.qualified(with: alias) })! - - case let .literal(sqlLiteral): - return .literal(sqlLiteral.qualified(with: alias)) - - case let .between(expression: expression, lowerBound: lowerBound, upperBound: upperBound, isNegated: isNegated): - return .between( - expression: expression.qualified(with: alias), - lowerBound: lowerBound.qualified(with: alias), - upperBound: upperBound.qualified(with: alias), - isNegated: isNegated) - - case let .binary(op, lhs, rhs): - return .binary(op, lhs.qualified(with: alias), rhs.qualified(with: alias)) - - case let .escapableBinary(op, lhs, rhs, escape): - return .escapableBinary( - op, - lhs.qualified(with: alias), - rhs.qualified(with: alias), - escape: escape?.qualified(with: alias)) - - case let .associativeBinary(op, expressions): - return .associativeBinary(op, expressions.map { $0.qualified(with: alias) }) - - case let .in(expression, collection, isNegated: isNegated): - return .in( - expression.qualified(with: alias), - collection.qualified(with: alias), - isNegated: isNegated - ) - - case let .unary(op, expression): - return .unary(op, expression.qualified(with: alias)) - - case let .compare(op, lhs, rhs): - return .compare(op, lhs.qualified(with: alias), rhs.qualified(with: alias)) - - case let .tableMatch(a, expression): - return .tableMatch(a, expression.qualified(with: alias)) - - case let .not(expression): - return .not(expression.qualified(with: alias)) - - case let .collated(expression, collationName): - return .collated(expression.qualified(with: alias), collationName) - - case .countAll: - return .countAll - - case let .function(name, aggregate: aggregate, distinct: distinct, arguments: arguments): - return SQLExpression(impl: .function(name, aggregate: aggregate, distinct: distinct, - arguments: arguments.map { $0.qualified(with: alias) })) - - case let .isEmpty(expression, isNegated: isNegated): - return .isEmpty(expression.qualified(with: alias), isNegated: isNegated) - - case .fastPrimaryKey: - return .qualifiedFastPrimaryKey(alias) - } - } - /// Returns true if the expression is an aggregate. /// /// When in doubt, returns false. @@ -1680,8 +1921,10 @@ extension SQLExpression { return false - case .countAll, - .function(_, aggregate: true, distinct: _, arguments: _): + case .countAll: + return true + + case .aggregateFunction: return true default: @@ -1690,17 +1933,203 @@ extension SQLExpression { } } +/// https://www.sqlite.org/syntax/simple-function-invocation.html +struct SQLSimpleFunctionInvocation { + var name: String + var arguments: [SQLExpression] + + /// A boolean value indicating if a function is known to be pure. + /// + /// A false value does not provide any information. + var isPure: Bool + + /// A boolean value indicating if a function is known to return a + /// JSON value. + /// + /// A false value does not provide any information. + var isJSONValue: Bool + + var isConstantInRequest: Bool { + isPure && arguments.allSatisfy(\.isConstantInRequest) + } + + func qualified(with alias: TableAlias) -> Self { + SQLSimpleFunctionInvocation( + name: name, + arguments: arguments.map { $0.qualified(with: alias) }, + isPure: isPure, + isJSONValue: isJSONValue) + } + + func column(_ db: Database, for alias: TableAlias) throws -> String? { + let name = name.uppercased() + if ["HEX", "QUOTE"].contains(name) && arguments.count == 1 { + return try arguments[0].column(db, for: alias, acceptsBijection: true) + } else if name == "IFNULL" && arguments.count == 2 && arguments[1].isConstantInRequest { + return try arguments[0].column(db, for: alias, acceptsBijection: true) + } else { + return nil + } + } + + func sql(_ context: SQLGenerationContext) throws -> String { + var sql = name + sql += "(" + sql += try arguments + .map { try $0.sql(context) } + .joined(separator: ", ") + sql += ")" + return sql + } +} + +/// https://www.sqlite.org/syntax/aggregate-function-invocation.html +struct SQLAggregateFunctionInvocation { + var name: String + var arguments: [SQLExpression] + var isDistinct = false + var ordering: SQLOrdering? = nil // SQLite 3.44.0+ + var filter: SQLExpression? = nil // @available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) SQLite 3.30+ + + /// A boolean value indicating if a function is known to return a + /// JSON value. + /// + /// A false value does not provide any information. + var isJSONValue: Bool + + func qualified(with alias: TableAlias) -> Self { + SQLAggregateFunctionInvocation( + name: name, + arguments: arguments.map { $0.qualified(with: alias) }, + isDistinct: isDistinct, + ordering: ordering?.qualified(with: alias), + filter: filter?.qualified(with: alias), + isJSONValue: isJSONValue) + } + + func sql(_ context: SQLGenerationContext, wrappedInParenthesis: Bool) throws -> String { + var sql = name + + if isDistinct { + sql += "(DISTINCT " + } else { + sql += "(" + } + + sql += try arguments + .map { try $0.sql(context) } + .joined(separator: ", ") + + if let ordering { + sql += try " ORDER BY \(ordering.sql(context))" + } + + sql += ")" + + if let filter { + sql += try " FILTER (WHERE \(filter.sql(context)))" + } + + if wrappedInParenthesis && filter != nil { + return "(\(sql))" + } else { + return sql + } + } +} + +// MARK: - JSON + +extension SQLExpression { + /// A boolean value indicating if the expression is known to be a + /// JSON value. + /// + /// A false value does not provide any information. + /// + /// For examples: + /// + /// ```swift + /// // isJSONValue is true: + /// // + /// // NULL + /// // JSON('[1, 2, 3]') + /// // info -> 'address' + /// DatabaseValue.null + /// Database.json("[1, 2, 3]") + /// JSONColumn("info").jsonRepresentation(forKey: "address") + /// + /// // isJSONValue is false + /// // + /// // '[1, 2, 3]' + /// // info + /// // info ->> 'address' + /// [1, 2, 3].databaseValue + /// JSONColumn("info") + /// JSONColumn("info")["address"] + /// ``` + var isJSONValue: Bool { + switch impl { + case .databaseValue(.null): + return true + + case let .binary(op, _, _): + return op.isJSONValue + + case let .collated(expression, _): + return expression.isJSONValue + + case let .simpleFunction(invocation): + return invocation.isJSONValue + + case let .aggregateFunction(invocation): + return invocation.isJSONValue + + default: + return false + } + } + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + /// Returns an expression suitable in JSON building contexts. + var jsonBuilderExpression: SQLExpression { + switch preferredJSONInterpretation { + case .deferredToSQLite: + return self + + case .jsonValue: + if isJSONValue { + return self + } else { + // Needs explicit call to JSON() + return .function("JSON", [self]) + } + } + } +#else + @available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) // SQLite 3.38+ with exceptions for macOS + /// Returns an expression suitable in JSON building contexts. + var jsonBuilderExpression: SQLExpression { + switch preferredJSONInterpretation { + case .deferredToSQLite: + return self + + case .jsonValue: + if isJSONValue { + return self + } else { + // Needs explicit call to JSON() + return .function("JSON", [self]) + } + } + } +#endif +} + // MARK: - SQLExpressible /// A type that can be used as an SQL expression. /// /// Related SQLite documentation -/// -/// ## Topics -/// -/// ### Supporting Type -/// -/// - ``SQLExpression`` public protocol SQLExpressible { /// Returns an SQL expression. var sqlExpression: SQLExpression { get } @@ -1725,11 +2154,6 @@ extension SQLExpressible where Self == Column { /// /// ## Topics /// -/// ### Column Expressions -/// -/// - ``Column`` -/// - ``ColumnExpression`` -/// /// ### Applying a Collation /// /// - ``collating(_:)-2mr78`` @@ -1737,8 +2161,11 @@ extension SQLExpressible where Self == Column { /// /// ### SQL Functions & Operators /// +/// See also JSON functions in . +/// /// - ``abs(_:)-5l6xp`` /// - ``average(_:)`` +/// - ``average(_:filter:)`` /// - ``capitalized`` /// - ``count(_:)`` /// - ``count(distinct:)`` @@ -1751,12 +2178,20 @@ extension SQLExpressible where Self == Column { /// - ``localizedUppercased`` /// - ``lowercased`` /// - ``min(_:)`` +/// - ``min(_:filter:)`` /// - ``max(_:)`` +/// - ``max(_:filter:)`` /// - ``sum(_:)`` +/// - ``sum(_:filter:)`` /// - ``total(_:)`` +/// - ``total(_:filter:)`` /// - ``uppercased`` /// - ``SQLDateModifier`` /// +/// ### Interpreting an expression as JSON +/// +/// - ``asJSON`` +/// /// ### Creating Ordering Terms /// /// - ``asc`` @@ -1893,13 +2328,13 @@ extension SQLSpecificExpressible { } #elseif !GRDBCIPHER /// An ordering term for ascending order (nulls last). - @available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) + @available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.30+ public var ascNullsLast: SQLOrdering { .ascNullsLast(sqlExpression) } /// An ordering term for descending order (nulls first). - @available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) + @available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.30+ public var descNullsFirst: SQLOrdering { .descNullsFirst(sqlExpression) } @@ -1936,7 +2371,7 @@ extension SQLSpecificExpressible { /// Returns an aliased column with the same name as the coding key. /// - /// See ``forKey(_:)-3xk0``. + /// See . public func forKey(_ key: some CodingKey) -> SQLSelection { forKey(key.stringValue) } diff --git a/GRDB/QueryInterface/SQL/SQLForeignKeyRequest.swift b/GRDB/QueryInterface/SQL/SQLForeignKeyRequest.swift index 75d4af2cd2..f1b38d7b53 100644 --- a/GRDB/QueryInterface/SQL/SQLForeignKeyRequest.swift +++ b/GRDB/QueryInterface/SQL/SQLForeignKeyRequest.swift @@ -22,7 +22,7 @@ struct SQLForeignKeyRequest { /// The (origin, destination) column pairs that join a left table to a right table. func fetchForeignKeyMapping(_ db: Database) throws -> ForeignKeyMapping { - if let originColumns = originColumns, let destinationColumns = destinationColumns { + if let originColumns, let destinationColumns { // Total information: no need to query the database schema. GRDBPrecondition(originColumns.count == destinationColumns.count, "Number of columns don't match") let mapping = zip(originColumns, destinationColumns).map { @@ -32,41 +32,76 @@ struct SQLForeignKeyRequest { } // Incomplete information: let's look for schema foreign keys - let foreignKeys = try db.foreignKeys(on: originTable).filter { foreignKey in - if destinationTable.lowercased() != foreignKey.destinationTable.lowercased() { - return false + // + // But maybe the tables are views. In this case, don't throw + // "no such table" error, because this is confusing for the user, + // as discovered in . + // Instead, we'll crash with a clear message. + + guard let originType = try tableType(db, for: originTable) else { + throw DatabaseError.noSuchTable(originTable) + } + + if originType.isView { + if originColumns == nil { + fatalError(""" + Could not infer foreign key from '\(originTable)' \ + to '\(destinationTable)'. To fix this error, provide an \ + explicit `ForeignKey` in the association definition. + """) } - if let originColumns = originColumns { - let originColumns = Set(originColumns.lazy.map { $0.lowercased() }) - let foreignKeyColumns = Set(foreignKey.mapping.lazy.map { $0.origin.lowercased() }) - if originColumns != foreignKeyColumns { + } else { + let foreignKeys = try db.foreignKeys(on: originTable).filter { foreignKey in + if destinationTable.lowercased() != foreignKey.destinationTable.lowercased() { return false } - } - if let destinationColumns = destinationColumns { - // TODO: test - let destinationColumns = Set(destinationColumns.lazy.map { $0.lowercased() }) - let foreignKeyColumns = Set(foreignKey.mapping.lazy.map { $0.destination.lowercased() }) - if destinationColumns != foreignKeyColumns { - return false + if let originColumns { + let originColumns = Set(originColumns.lazy.map { $0.lowercased() }) + let foreignKeyColumns = Set(foreignKey.mapping.lazy.map { $0.origin.lowercased() }) + if originColumns != foreignKeyColumns { + return false + } + } + if let destinationColumns { + // TODO: test + let destinationColumns = Set(destinationColumns.lazy.map { $0.lowercased() }) + let foreignKeyColumns = Set(foreignKey.mapping.lazy.map { $0.destination.lowercased() }) + if destinationColumns != foreignKeyColumns { + return false + } } + return true } - return true - } - - // Matching foreign key(s) found - if let foreignKey = foreignKeys.first { - if foreignKeys.count == 1 { - // Non-ambiguous - return foreignKey.mapping - } else { - // Ambiguous: can't choose - fatalError("Ambiguous foreign key from \(originTable) to \(destinationTable)") + + // Matching foreign key(s) found + if let foreignKey = foreignKeys.first { + if foreignKeys.count == 1 { + // Non-ambiguous + return foreignKey.mapping + } else { + // Ambiguous: can't choose + fatalError(""" + Ambiguous foreign key from '\(originTable)' to \ + '\(destinationTable)'. To fix this error, provide an \ + explicit `ForeignKey` in the association definition. + """) + } } } // No matching foreign key found: use the destination primary key - if let originColumns = originColumns { + if let originColumns { + guard let destinationType = try tableType(db, for: destinationTable) else { + throw DatabaseError.noSuchTable(destinationTable) + } + if destinationType.isView { + fatalError(""" + Could not infer foreign key from '\(originTable)' \ + to '\(destinationTable)'. To fix this error, provide an \ + explicit `ForeignKey` in the association definition, \ + with both origin and destination columns. + """) + } let destinationColumns = try db.primaryKey(destinationTable).columns if originColumns.count == destinationColumns.count { let mapping = zip(originColumns, destinationColumns).map { @@ -76,7 +111,28 @@ struct SQLForeignKeyRequest { } } - fatalError("Could not infer foreign key from \(originTable) to \(destinationTable)") + fatalError(""" + Could not infer foreign key from '\(originTable)' to \ + '\(destinationTable)'. To fix this error, provide an \ + explicit `ForeignKey` in the association definition. + """) + } + + private struct TableType { + var isView: Bool + } + + private func tableType(_ db: Database, for name: String) throws -> TableType? { + for schemaID in try db.schemaIdentifiers() { + if try db.schema(schemaID).containsObjectNamed(name, ofType: .table) { + return TableType(isView: false) + } + if try db.schema(schemaID).containsObjectNamed(name, ofType: .view) { + return TableType(isView: true) + } + } + + return nil } } diff --git a/GRDB/QueryInterface/SQL/SQLFunctions.swift b/GRDB/QueryInterface/SQL/SQLFunctions.swift index 9d1796a2db..08a1a39ea8 100644 --- a/GRDB/QueryInterface/SQL/SQLFunctions.swift +++ b/GRDB/QueryInterface/SQL/SQLFunctions.swift @@ -10,6 +10,40 @@ public func abs(_ value: some SQLSpecificExpressible) -> SQLExpression { .function("ABS", [value.sqlExpression]) } +#if GRDBCUSTOMSQLITE || GRDBCIPHER +/// The `AVG` SQL function. +/// +/// For example: +/// +/// ```swift +/// // AVG(length) +/// average(Column("length")) +/// ``` +public func average( + _ value: some SQLSpecificExpressible, + filter: (any SQLSpecificExpressible)? = nil) +-> SQLExpression { + .aggregateFunction("AVG", [value.sqlExpression], filter: filter?.sqlExpression) +} +#else +/// The `AVG` SQL function. +/// +/// For example: +/// +/// ```swift +/// // AVG(length) FILTER (WHERE length > 0) +/// average(Column("length"), filter: Column("length") > 0) +/// ``` +@available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.30+ +public func average( + _ value: some SQLSpecificExpressible, + filter: some SQLSpecificExpressible) +-> SQLExpression { + .aggregateFunction( + "AVG", [value.sqlExpression], + filter: filter.sqlExpression) +} + /// The `AVG` SQL function. /// /// For example: @@ -19,7 +53,22 @@ public func abs(_ value: some SQLSpecificExpressible) -> SQLExpression { /// average(Column("length")) /// ``` public func average(_ value: some SQLSpecificExpressible) -> SQLExpression { - .aggregate("AVG", [value.sqlExpression]) + .aggregateFunction("AVG", [value.sqlExpression]) +} +#endif + +/// The `CAST` SQL function. +/// +/// For example: +/// +/// ```swift +/// // CAST(value AS REAL) +/// cast(Column("value"), as: .real) +/// ``` +/// +/// Related SQLite documentation: +public func cast(_ expression: some SQLSpecificExpressible, as storageClass: Database.StorageClass) -> SQLExpression { + .cast(expression.sqlExpression, as: storageClass) } /// The `COUNT` SQL function. @@ -72,6 +121,38 @@ public func length(_ value: some SQLSpecificExpressible) -> SQLExpression { .function("LENGTH", [value.sqlExpression]) } +#if GRDBCUSTOMSQLITE || GRDBCIPHER +/// The `MAX` SQL function. +/// +/// For example: +/// +/// ```swift +/// // MAX(score) +/// max(Column("score")) +/// ``` +public func max( + _ value: some SQLSpecificExpressible, + filter: (any SQLSpecificExpressible)? = nil) +-> SQLExpression { + .aggregateFunction("MAX", [value.sqlExpression], filter: filter?.sqlExpression) +} +#else +/// The `MAX` SQL function. +/// +/// For example: +/// +/// ```swift +/// // MAX(score) FILTER (WHERE score < 0) +/// max(Column("score"), filter: Column("score") < 0) +/// ``` +@available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.30+ +public func max( + _ value: some SQLSpecificExpressible, + filter: some SQLSpecificExpressible) +-> SQLExpression { + .aggregateFunction("MAX", [value.sqlExpression], filter: filter.sqlExpression) +} + /// The `MAX` SQL function. /// /// For example: @@ -81,7 +162,40 @@ public func length(_ value: some SQLSpecificExpressible) -> SQLExpression { /// max(Column("score")) /// ``` public func max(_ value: some SQLSpecificExpressible) -> SQLExpression { - .aggregate("MAX", [value.sqlExpression]) + .aggregateFunction("MAX", [value.sqlExpression]) +} +#endif + +#if GRDBCUSTOMSQLITE || GRDBCIPHER +/// The `MIN` SQL function. +/// +/// For example: +/// +/// ```swift +/// // MIN(score) +/// min(Column("score")) +/// ``` +public func min( + _ value: some SQLSpecificExpressible, + filter: (any SQLSpecificExpressible)? = nil) +-> SQLExpression { + .aggregateFunction("MIN", [value.sqlExpression], filter: filter?.sqlExpression) +} +#else +/// The `MIN` SQL function. +/// +/// For example: +/// +/// ```swift +/// // MIN(score) FILTER (WHERE score > 0) +/// min(Column("score"), filter: Column("score") > 0) +/// ``` +@available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.30+ +public func min( + _ value: some SQLSpecificExpressible, + filter: some SQLSpecificExpressible) +-> SQLExpression { + .aggregateFunction("MIN", [value.sqlExpression], filter: filter.sqlExpression) } /// The `MIN` SQL function. @@ -93,7 +207,55 @@ public func max(_ value: some SQLSpecificExpressible) -> SQLExpression { /// min(Column("score")) /// ``` public func min(_ value: some SQLSpecificExpressible) -> SQLExpression { - .aggregate("MIN", [value.sqlExpression]) + .aggregateFunction("MIN", [value.sqlExpression]) +} +#endif + +#if GRDBCUSTOMSQLITE || GRDBCIPHER +/// The `SUM` SQL function. +/// +/// For example: +/// +/// ```swift +/// // SUM(amount) +/// sum(Column("amount")) +/// ``` +/// +/// See also ``total(_:)``. +/// +/// Related SQLite documentation: . +public func sum( + _ value: some SQLSpecificExpressible, + orderBy ordering: (any SQLOrderingTerm)? = nil, + filter: (any SQLSpecificExpressible)? = nil) +-> SQLExpression +{ + .aggregateFunction( + "SUM", [value.sqlExpression], + ordering: ordering?.sqlOrdering, + filter: filter?.sqlExpression) +} +#else +/// The `SUM` SQL function. +/// +/// For example: +/// +/// ```swift +/// // SUM(amount) FILTER (WHERE amount > 0) +/// sum(Column("amount"), filter: Column("amount") > 0) +/// ``` +/// +/// See also ``total(_:)``. +/// +/// Related SQLite documentation: . +@available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.30+ +public func sum( + _ value: some SQLSpecificExpressible, + filter: some SQLSpecificExpressible) +-> SQLExpression { + .aggregateFunction( + "SUM", [value.sqlExpression], + filter: filter.sqlExpression) } /// The `SUM` SQL function. @@ -109,7 +271,55 @@ public func min(_ value: some SQLSpecificExpressible) -> SQLExpression { /// /// Related SQLite documentation: . public func sum(_ value: some SQLSpecificExpressible) -> SQLExpression { - .aggregate("SUM", [value.sqlExpression]) + .aggregateFunction("SUM", [value.sqlExpression]) +} +#endif + +#if GRDBCUSTOMSQLITE || GRDBCIPHER +/// The `TOTAL` SQL function. +/// +/// For example: +/// +/// ```swift +/// // TOTAL(amount) +/// total(Column("amount")) +/// ``` +/// +/// See also ``sum(_:)``. +/// +/// Related SQLite documentation: . +public func total( + _ value: some SQLSpecificExpressible, + orderBy ordering: (any SQLOrderingTerm)? = nil, + filter: (any SQLSpecificExpressible)? = nil) +-> SQLExpression +{ + .aggregateFunction( + "TOTAL", [value.sqlExpression], + ordering: ordering?.sqlOrdering, + filter: filter?.sqlExpression) +} +#else +/// The `TOTAL` SQL function. +/// +/// For example: +/// +/// ```swift +/// // TOTAL(amount) FILTER (WHERE amount > 0) +/// total(Column("amount"), filter: Column("amount") > 0) +/// ``` +/// +/// See also ``total(_:)``. +/// +/// Related SQLite documentation: . +@available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // SQLite 3.30+ +public func total( + _ value: some SQLSpecificExpressible, + filter: some SQLSpecificExpressible) +-> SQLExpression { + .aggregateFunction( + "TOTAL", [value.sqlExpression], + filter: filter.sqlExpression) } /// The `TOTAL` SQL function. @@ -125,8 +335,9 @@ public func sum(_ value: some SQLSpecificExpressible) -> SQLExpression { /// /// Related SQLite documentation: . public func total(_ value: some SQLSpecificExpressible) -> SQLExpression { - .aggregate("TOTAL", [value.sqlExpression]) + .aggregateFunction("TOTAL", [value.sqlExpression]) } +#endif // MARK: - String functions @@ -211,7 +422,7 @@ extension SQLSpecificExpressible { /// A date modifier for SQLite date functions. /// /// Related SQLite documentation: -public enum SQLDateModifier: SQLSpecificExpressible { +public enum SQLDateModifier: SQLSpecificExpressible, Sendable { /// Adds the specified amount of seconds case second(Double) diff --git a/GRDB/QueryInterface/SQL/SQLOperators.swift b/GRDB/QueryInterface/SQL/SQLOperators.swift index bf7759f991..993ee7bc5e 100644 --- a/GRDB/QueryInterface/SQL/SQLOperators.swift +++ b/GRDB/QueryInterface/SQL/SQLOperators.swift @@ -126,7 +126,7 @@ extension SQLSpecificExpressible { /// The `IS` SQL operator. public static func === (lhs: (any SQLExpressible)?, rhs: Self) -> SQLExpression { - if let lhs = lhs { + if let lhs { return .compare(.is, lhs.sqlExpression, rhs.sqlExpression) } else { return .compare(.is, rhs.sqlExpression, .null) @@ -436,6 +436,75 @@ extension SQLSpecificExpressible { } } +// MARK: - Bitwise Operators (&, |, ~, <<, >>) + +extension SQLSpecificExpressible { + /// The `&` SQL operator. + public static func & (lhs: Self, rhs: some SQLExpressible) -> SQLExpression { + .associativeBinary(.bitwiseAnd, [lhs.sqlExpression, rhs.sqlExpression]) + } + + /// The `&` SQL operator. + public static func & (lhs: some SQLExpressible, rhs: Self) -> SQLExpression { + .associativeBinary(.bitwiseAnd, [lhs.sqlExpression, rhs.sqlExpression]) + } + + /// The `&` SQL operator. + public static func & (lhs: Self, rhs: some SQLSpecificExpressible) -> SQLExpression { + .associativeBinary(.bitwiseAnd, [lhs.sqlExpression, rhs.sqlExpression]) + } + + /// The `|` SQL operator. + public static func | (lhs: Self, rhs: some SQLExpressible) -> SQLExpression { + .associativeBinary(.bitwiseOr, [lhs.sqlExpression, rhs.sqlExpression]) + } + + /// The `|` SQL operator. + public static func | (lhs: some SQLExpressible, rhs: Self) -> SQLExpression { + .associativeBinary(.bitwiseOr, [lhs.sqlExpression, rhs.sqlExpression]) + } + + /// The `|` SQL operator. + public static func | (lhs: Self, rhs: some SQLSpecificExpressible) -> SQLExpression { + .associativeBinary(.bitwiseOr, [lhs.sqlExpression, rhs.sqlExpression]) + } + + /// The `<<` SQL operator. + public static func << (lhs: Self, rhs: some SQLExpressible) -> SQLExpression { + .binary(.leftShift, lhs.sqlExpression, rhs.sqlExpression) + } + + /// The `<<` SQL operator. + public static func << (lhs: some SQLExpressible, rhs: Self) -> SQLExpression { + .binary(.leftShift, lhs.sqlExpression, rhs.sqlExpression) + } + + /// The `<<` SQL operator. + public static func << (lhs: Self, rhs: some SQLSpecificExpressible) -> SQLExpression { + .binary(.leftShift, lhs.sqlExpression, rhs.sqlExpression) + } + + /// The `>>` SQL operator. + public static func >> (lhs: Self, rhs: some SQLExpressible) -> SQLExpression { + .binary(.rightShift, lhs.sqlExpression, rhs.sqlExpression) + } + + /// The `>>` SQL operator. + public static func >> (lhs: some SQLExpressible, rhs: Self) -> SQLExpression { + .binary(.rightShift, lhs.sqlExpression, rhs.sqlExpression) + } + + /// The `>>` SQL operator. + public static func >> (lhs: Self, rhs: some SQLSpecificExpressible) -> SQLExpression { + .binary(.rightShift, lhs.sqlExpression, rhs.sqlExpression) + } + + /// The `~` SQL operator. + public static prefix func ~ (value: Self) -> SQLExpression { + .unary(.bitwiseNot, value.sqlExpression) + } +} + // MARK: - Like Operator extension SQLSpecificExpressible { diff --git a/GRDB/QueryInterface/SQL/SQLRelation.swift b/GRDB/QueryInterface/SQL/SQLRelation.swift index e4c687bf50..9f01841c2a 100644 --- a/GRDB/QueryInterface/SQL/SQLRelation.swift +++ b/GRDB/QueryInterface/SQL/SQLRelation.swift @@ -282,6 +282,28 @@ extension SQLRelation: Refinable { } } + func withStableOrder() -> Self { + with { relation in + relation.ordering = relation.ordering.appending(Ordering(orderings: { [relation] db in + if try db.tableExists(source.tableName) { + // Order by primary key. Don't order by rowid because those are + // not stable: rowids can change after a vacuum. + return try db.primaryKey(source.tableName).columns.map { SQLExpression.column($0).sqlOrdering } + } else { + // Support for views: create a stable order from all columns: + // ORDER BY 1, 2, 3, ... + let columnCount = try SQLQueryGenerator(relation: relation).columnCount(db) + return (1...columnCount).map { SQL(sql: $0.description).sqlOrdering } + } + })) + relation.children = children.mapValues { child in + child.with { + $0.relation = $0.relation.withStableOrder() + } + } + } + } + // Remove ordering iff relation has no LIMIT clause func unorderedUnlessLimited() -> Self { if limit != nil { @@ -530,11 +552,18 @@ extension SQLRelation { } } - func removingChildrenForPrefetchedAssociations() -> Self { - filteringChildren { - switch $0.kind { - case .all, .bridge: return false - case .oneRequired, .oneOptional: return true + /// Return a relation without any `.all` and `.bridge` children, recursively. + func removingPrefetchedAssociations() -> Self { + with { + $0.children = $0.children.compactMapValues { child in + switch child.kind { + case .all, .bridge: + return nil + case .oneRequired, .oneOptional: + return child.with { + $0.relation = $0.relation.removingPrefetchedAssociations() + } + } } } } @@ -605,7 +634,12 @@ extension SQLRelation { guard !isDistinct else { return try fetchTrivialCount(db) } - + + // + guard selection.allSatisfy(\.isTriviallyCountable) else { + return try fetchTrivialCount(db) + } + // SELECT expr1, expr2, ... FROM tableName ... // -> // SELECT COUNT(*) FROM tableName ... @@ -628,7 +662,7 @@ struct SQLLimit { let offset: Int? var sql: String { - if let offset = offset { + if let offset { return "\(limit) OFFSET \(offset)" } else { return "\(limit)" diff --git a/GRDB/QueryInterface/SQL/SQLSelection.swift b/GRDB/QueryInterface/SQL/SQLSelection.swift index aec58de09a..c4ff88045b 100644 --- a/GRDB/QueryInterface/SQL/SQLSelection.swift +++ b/GRDB/QueryInterface/SQL/SQLSelection.swift @@ -244,6 +244,18 @@ extension SQLSelection { return .literal(sqlLiteral.qualified(with: alias)) } } + + /// Supports SQLRelation.fetchCount. + /// + /// See + var isTriviallyCountable: Bool { + switch impl { + case .aliasedExpression, .literal: + return false + case .allColumns, .qualifiedAllColumns, .expression: + return true + } + } } extension [SQLSelection] { @@ -312,7 +324,7 @@ extension SQLSelection: SQLSelectable { /// let players = try Player.select(AllColumns()).fetchAll(db) /// } /// ``` -public struct AllColumns { +public struct AllColumns: Sendable { /// The `*` selection. public init() { } } diff --git a/GRDB/QueryInterface/SQL/Table.swift b/GRDB/QueryInterface/SQL/Table.swift index d6ec258be4..c6de851c74 100644 --- a/GRDB/QueryInterface/SQL/Table.swift +++ b/GRDB/QueryInterface/SQL/Table.swift @@ -138,7 +138,7 @@ /// ### Database Observation Support /// /// - ``databaseRegion(_:)`` -public struct Table { +public struct Table: Sendable { /// The table name. public var tableName: String @@ -298,10 +298,10 @@ extension Table { /// let maxScore: Int = row[1] /// } /// ``` - public func select( + public func select( _ selection: [any SQLSelectable], - as type: RowDecoder.Type = RowDecoder.self) - -> QueryInterfaceRequest + as type: T.Type = T.self) + -> QueryInterfaceRequest { all().select(selection, as: type) } @@ -328,10 +328,10 @@ extension Table { /// let maxScore: Int = row[1] /// } /// ``` - public func select( + public func select( _ selection: any SQLSelectable..., - as type: RowDecoder.Type = RowDecoder.self) - -> QueryInterfaceRequest + as type: T.Type = T.self) + -> QueryInterfaceRequest { all().select(selection, as: type) } @@ -353,11 +353,11 @@ extension Table { /// let request = playerTable.select(sql: "IFNULL(name, ?)", arguments: [defaultName], as: String.self) /// let names = try request.fetchAll(db) // [String] /// ``` - public func select( + public func select( sql: String, arguments: StatementArguments = StatementArguments(), - as type: RowDecoder.Type = RowDecoder.self) - -> QueryInterfaceRequest + as type: T.Type = T.self) + -> QueryInterfaceRequest { all().select(SQL(sql: sql, arguments: arguments), as: type) } @@ -376,10 +376,10 @@ extension Table { /// let request = playerTable.select(literal: "IFNULL(name, \(defaultName))", as: String.self) /// let names = try request.fetchAll(db) // [String] /// ``` - public func select( + public func select( literal sqlLiteral: SQL, - as type: RowDecoder.Type = RowDecoder.self) - -> QueryInterfaceRequest + as type: T.Type = T.self) + -> QueryInterfaceRequest { all().select(sqlLiteral, as: type) } @@ -723,7 +723,7 @@ extension Table { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Table where RowDecoder: Identifiable, RowDecoder.ID: DatabaseValueConvertible { /// Returns a request filtered by primary key. /// @@ -1346,7 +1346,7 @@ extension Table { { let association = HasManyThroughAssociation(through: pivot, using: target) - if let key = key { + if let key { return association.forKey(key) } else { return association @@ -1378,7 +1378,7 @@ extension Table { { let association = HasOneThroughAssociation(through: pivot, using: target) - if let key = key { + if let key { return association.forKey(key) } else { return association @@ -1546,7 +1546,7 @@ extension Table { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Table where RowDecoder: Identifiable, RowDecoder.ID: DatabaseValueConvertible @@ -1688,7 +1688,7 @@ extension Table { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Table where RowDecoder: Identifiable, RowDecoder.ID: DatabaseValueConvertible diff --git a/GRDB/QueryInterface/SQLGeneration/SQLColumnGenerator.swift b/GRDB/QueryInterface/SQLGeneration/SQLColumnGenerator.swift new file mode 100644 index 0000000000..d8754514c7 --- /dev/null +++ b/GRDB/QueryInterface/SQLGeneration/SQLColumnGenerator.swift @@ -0,0 +1,145 @@ +enum SQLColumnGenerator { + case columnDefinition(ColumnDefinition) + case columnLiteral(SQL) + + /// - parameter tableName: The name of the table that contains + /// the column. + /// - parameter primaryKeyColumns: A closure that returns the + /// primary key columns in the table that contains the column. If + /// the result is nil, the primary key is the hidden rowID. + func sql( + _ db: Database, + tableName: String, + primaryKeyColumns: () throws -> [SQLColumnDescriptor]?) + throws -> String + { + switch self { + case let .columnDefinition(column): + return try columnSQL( + db, column: column, + tableName: tableName, + primaryKeyColumns: primaryKeyColumns) + + case let .columnLiteral(sqlLiteral): + let context = SQLGenerationContext(db, argumentsSink: .literalValues) + return try sqlLiteral.sql(context) + } + } + + private func columnSQL( + _ db: Database, + column: ColumnDefinition, + tableName: String, + primaryKeyColumns: () throws -> [SQLColumnDescriptor]?) + throws -> String + { + var chunks: [String] = [] + chunks.append(column.name.quotedDatabaseIdentifier) + + if let type = column.type { + chunks.append(type.rawValue) + } + + if let (conflictResolution, autoincrement) = column.primaryKey { + chunks.append("PRIMARY KEY") + if let conflictResolution { + chunks.append("ON CONFLICT") + chunks.append(conflictResolution.rawValue) + } + if autoincrement { + chunks.append("AUTOINCREMENT") + } + } + + switch column.notNullConflictResolution { + case .none: + break + case .abort: + chunks.append("NOT NULL") + case let conflictResolution?: + chunks.append("NOT NULL ON CONFLICT") + chunks.append(conflictResolution.rawValue) + } + + switch column.indexing { + case .none: + break + case .unique(let conflictResolution): + switch conflictResolution { + case .abort: + chunks.append("UNIQUE") + default: + chunks.append("UNIQUE ON CONFLICT") + chunks.append(conflictResolution.rawValue) + } + case .index: + break + } + + for checkConstraint in column.checkConstraints { + try chunks.append("CHECK (\(checkConstraint.quotedSQL(db)))") + } + + if let defaultExpression = column.defaultExpression { + try chunks.append("DEFAULT \(defaultExpression.quotedSQL(db))") + } + + if let collationName = column.collationName { + chunks.append("COLLATE") + chunks.append(collationName) + } + + for constraint in column.foreignKeyConstraints { + chunks.append("REFERENCES") + if let column = constraint.destinationColumn { + // explicit referenced column names + chunks.append(""" + \(constraint.destinationTable.quotedDatabaseIdentifier)\ + (\(column.quotedDatabaseIdentifier)) + """) + } else { + // implicit reference to primary key + let pkColumns: [String] + + if constraint.destinationTable.lowercased() == tableName.lowercased() { + // autoreference + let primaryKeyColumns = try primaryKeyColumns() ?? [.rowID] + pkColumns = primaryKeyColumns.map(\.name) + } else { + pkColumns = try db.primaryKey(constraint.destinationTable).columns + } + + chunks.append(""" + \(constraint.destinationTable.quotedDatabaseIdentifier)\ + (\(pkColumns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))) + """) + } + + if let deleteAction = constraint.deleteAction { + chunks.append("ON DELETE") + chunks.append(deleteAction.rawValue) + } + if let updateAction = constraint.updateAction { + chunks.append("ON UPDATE") + chunks.append(updateAction.rawValue) + } + if constraint.isDeferred { + chunks.append("DEFERRABLE INITIALLY DEFERRED") + } + } + + if let constraint = column.generatedColumnConstraint { + try chunks.append("GENERATED ALWAYS AS (\(constraint.expression.quotedSQL(db)))") + let qualificationLiteral: String + switch constraint.qualification { + case .stored: + qualificationLiteral = "STORED" + case .virtual: + qualificationLiteral = "VIRTUAL" + } + chunks.append(qualificationLiteral) + } + + return chunks.joined(separator: " ") + } +} diff --git a/GRDB/QueryInterface/SQLGeneration/SQLGenerationContext.swift b/GRDB/QueryInterface/SQLGeneration/SQLGenerationContext.swift index 80131277b9..d364a6bbba 100644 --- a/GRDB/QueryInterface/SQLGeneration/SQLGenerationContext.swift +++ b/GRDB/QueryInterface/SQLGeneration/SQLGenerationContext.swift @@ -172,15 +172,21 @@ class StatementArgumentsSink { private(set) var arguments: StatementArguments private let rawSQL: Bool - /// A sink which does not accept any arguments. - static let forRawSQL = StatementArgumentsSink(rawSQL: true) + /// A sink which turns all argument values into SQL literals. + /// + /// The `"WHERE name = \("O'Brien")"` SQL literal is turned into the + /// `WHERE name = 'O''Brien'` SQL. + static let literalValues = StatementArgumentsSink(rawSQL: true) private init(rawSQL: Bool) { self.arguments = [] self.rawSQL = rawSQL } - /// A sink which accepts arguments + /// A sink which turns all argument values into `?` SQL parameters. + /// + /// The `"WHERE name = \("O'Brien")"` SQL literal is turned into the + /// `WHERE name = ?` SQL. convenience init() { self.init(rawSQL: false) } @@ -324,7 +330,7 @@ public class TableAlias { switch impl { case let .undefined(userName): - if let userName = userName { + if let userName { // rename assert(base.userName == nil || base.userName == userName) base.setUserName(userName) @@ -332,7 +338,7 @@ public class TableAlias { self.impl = .proxy(base) case let .table(tableName: tableName, userName: userName): assert(tableName == base.tableName) - if let userName = userName { + if let userName { // rename assert(base.userName == nil || base.userName == userName) base.setUserName(userName) @@ -358,7 +364,7 @@ public class TableAlias { // can't merge return nil } - if let userName = userName, let otherUserName = otherUserName, userName != otherUserName { + if let userName, let otherUserName, userName != otherUserName { // can't merge return nil } @@ -425,6 +431,15 @@ public class TableAlias { expression.sqlExpression.qualified(with: self) } + public subscript(_ expression: some SQLJSONExpressible & + SQLSpecificExpressible & + SQLSelectable & + SQLOrderingTerm) + -> AnySQLJSONExpressible + { + AnySQLJSONExpressible(sqlExpression: expression.sqlExpression.qualified(with: self)) + } + /// Returns an SQL ordering term that refers to the aliased table. /// /// For example, let's sort books by author name first, and then by title: diff --git a/GRDB/QueryInterface/SQLGeneration/SQLIndexGenerator.swift b/GRDB/QueryInterface/SQLGeneration/SQLIndexGenerator.swift new file mode 100644 index 0000000000..81e63a3de1 --- /dev/null +++ b/GRDB/QueryInterface/SQLGeneration/SQLIndexGenerator.swift @@ -0,0 +1,42 @@ +struct SQLIndexGenerator { + let name: String + let table: String + let expressions: [SQLExpression] + let options: IndexOptions + let condition: SQLExpression? + + func sql(_ db: Database) throws -> String { + var sql: SQL = "CREATE" + + if options.contains(.unique) { + sql += " UNIQUE" + } + + sql += " INDEX" + + if options.contains(.ifNotExists) { + sql += " IF NOT EXISTS" + } + + sql += " \(identifier: name) ON \(identifier: table)(" + sql += expressions.map { SQL($0) }.joined(separator: ", ") + sql += ")" + + if let condition { + sql += " WHERE \(condition)" + } + + let context = SQLGenerationContext(db, argumentsSink: .literalValues) + return try sql.sql(context) + } +} + +extension SQLIndexGenerator { + init(index: IndexDefinition) { + name = index.name + table = index.table + expressions = index.expressions + options = index.options + condition = index.condition + } +} diff --git a/GRDB/QueryInterface/SQLGeneration/SQLQueryGenerator.swift b/GRDB/QueryInterface/SQLGeneration/SQLQueryGenerator.swift index 9d393571b8..17551514bd 100644 --- a/GRDB/QueryInterface/SQLGeneration/SQLQueryGenerator.swift +++ b/GRDB/QueryInterface/SQLGeneration/SQLQueryGenerator.swift @@ -47,7 +47,7 @@ struct SQLQueryGenerator: Refinable { } let filter = try relation.filterPromise?.resolve(context.db) - if let filter = filter { + if let filter { sql += " WHERE " sql += try filter.sql(context) } @@ -83,7 +83,7 @@ struct SQLQueryGenerator: Refinable { limit = SQLLimit(limit: 1, offset: limit?.offset) } - if let limit = limit { + if let limit { sql += " LIMIT " sql += limit.sql } @@ -566,8 +566,8 @@ private struct SQLQualifiedRelation { /// The full selection, including selection of joined relations var selectionPromise: DatabasePromise<[SQLSelection]> { DatabasePromise { db in - let selection = try self.sourceSelectionPromise.resolve(db) - return try self.joins.values.reduce(into: selection) { selection, join in + let selection = try sourceSelectionPromise.resolve(db) + return try joins.values.reduce(into: selection) { selection, join in let joinedSelection = try join.relation.selectionPromise.resolve(db) selection.append(contentsOf: joinedSelection) } diff --git a/GRDB/QueryInterface/SQLGeneration/SQLTableAlterationGenerator.swift b/GRDB/QueryInterface/SQLGeneration/SQLTableAlterationGenerator.swift new file mode 100644 index 0000000000..030dfddb4e --- /dev/null +++ b/GRDB/QueryInterface/SQLGeneration/SQLTableAlterationGenerator.swift @@ -0,0 +1,84 @@ +struct SQLTableAlterationGenerator { + private enum TableAlterationKind { + case addColumn(SQLColumnGenerator) + case addIndex(SQLIndexGenerator) + case renameColumn(old: String, new: String) + case dropColumn(String) + } + + private var name: String + private var alterations: [TableAlterationKind] = [] + + func sql(_ db: Database) throws -> String { + var statements: [String] = [] + + for alteration in alterations { + switch alteration { + case let .addColumn(column): + var chunks: [String] = [] + chunks.append("ALTER TABLE") + chunks.append(name.quotedDatabaseIdentifier) + chunks.append("ADD COLUMN") + let sql = try column.sql(db, tableName: name, primaryKeyColumns: { + try db.primaryKey(name).columnInfos.map { columnInfos in + columnInfos.map { SQLColumnDescriptor($0) } + } + }) + chunks.append(sql) + let statement = chunks.joined(separator: " ") + statements.append(statement) + + case let .addIndex(index): + try statements.append(index.sql(db)) + + case let .renameColumn(oldName, newName): + var chunks: [String] = [] + chunks.append("ALTER TABLE") + chunks.append(name.quotedDatabaseIdentifier) + chunks.append("RENAME COLUMN") + chunks.append(oldName.quotedDatabaseIdentifier) + chunks.append("TO") + chunks.append(newName.quotedDatabaseIdentifier) + let statement = chunks.joined(separator: " ") + statements.append(statement) + + case let .dropColumn(column): + var chunks: [String] = [] + chunks.append("ALTER TABLE") + chunks.append(name.quotedDatabaseIdentifier) + chunks.append("DROP COLUMN") + chunks.append(column.quotedDatabaseIdentifier) + let statement = chunks.joined(separator: " ") + statements.append(statement) + } + } + + return statements.joined(separator: "; ") + } +} + +extension SQLTableAlterationGenerator { + init(_ tableAlteration: TableAlteration) { + self.name = tableAlteration.name + self.alterations = [] + + for alteration in tableAlteration.alterations { + switch alteration { + case let .add(column): + alterations.append(.addColumn(.columnDefinition(column))) + if let indexDefinition = column.indexDefinition(in: name) { + alterations.append(.addIndex(SQLIndexGenerator(index: indexDefinition))) + } + + case let .addColumnLiteral(sql): + alterations.append(.addColumn(.columnLiteral(sql))) + + case let .rename(old: oldName, new: newName): + alterations.append(.renameColumn(old: oldName, new: newName)) + + case let .drop(column): + alterations.append(.dropColumn(column)) + } + } + } +} diff --git a/GRDB/QueryInterface/SQLGeneration/SQLTableGenerator.swift b/GRDB/QueryInterface/SQLGeneration/SQLTableGenerator.swift new file mode 100644 index 0000000000..740b74dcce --- /dev/null +++ b/GRDB/QueryInterface/SQLGeneration/SQLTableGenerator.swift @@ -0,0 +1,476 @@ +struct SQLTableGenerator { + var name: String + var options: TableOptions + var columnGenerators: [SQLColumnGenerator] + /// Used for auto-referencing foreign keys: we need to know the columns + /// of the primary key before they exist in the database schema, hence + /// the name of "forward" primary key columns. + /// + /// If nil, the primary key is the hidden rowID. + var forwardPrimaryKeyColumns: [SQLColumnDescriptor]? + var primaryKeyConstraint: KeyConstraint? + var uniqueKeyConstraints: [KeyConstraint] + var foreignKeyConstraints: [SQLForeignKeyConstraint] + var checkConstraints: [SQLExpression] + var literalConstraints: [SQL] + var indexGenerators: [SQLIndexGenerator] + + struct KeyConstraint { + var columns: [String] + var conflictResolution: Database.ConflictResolution? + } + + func sql(_ db: Database) throws -> String { + var statements: [String] = [] + + do { + var chunks: [String] = [] + chunks.append("CREATE") + if options.contains(.temporary) { + chunks.append("TEMPORARY") + } + chunks.append("TABLE") + if options.contains(.ifNotExists) { + chunks.append("IF NOT EXISTS") + } + chunks.append(name.quotedDatabaseIdentifier) + + do { + var items: [String] = [] + try items.append(contentsOf: columnGenerators.map { + try $0.sql(db, tableName: name, primaryKeyColumns: { forwardPrimaryKeyColumns }) + }) + + if let constraint = primaryKeyConstraint { + var chunks: [String] = [] + chunks.append("PRIMARY KEY") + chunks.append("(\(constraint.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", ")))") + if let conflictResolution = constraint.conflictResolution { + chunks.append("ON CONFLICT") + chunks.append(conflictResolution.rawValue) + } + items.append(chunks.joined(separator: " ")) + } + + for constraint in uniqueKeyConstraints { + var chunks: [String] = [] + chunks.append("UNIQUE") + chunks.append("(\(constraint.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", ")))") + if let conflictResolution = constraint.conflictResolution { + chunks.append("ON CONFLICT") + chunks.append(conflictResolution.rawValue) + } + items.append(chunks.joined(separator: " ")) + } + + for constraint in foreignKeyConstraints { + var chunks: [String] = [] + chunks.append("FOREIGN KEY") + chunks.append("(\(constraint.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", ")))") + chunks.append("REFERENCES") + if let destinationColumns = constraint.destinationColumns { + chunks.append(""" + \(constraint.destinationTable.quotedDatabaseIdentifier)(\ + \(destinationColumns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ + ) + """) + } else if constraint.destinationTable.lowercased() == name.lowercased() { + // autoreference + let forwardPrimaryKeyColumns = forwardPrimaryKeyColumns ?? [.rowID] + chunks.append(""" + \(constraint.destinationTable.quotedDatabaseIdentifier)(\ + \(forwardPrimaryKeyColumns.map(\.name.quotedDatabaseIdentifier).joined(separator: ", "))\ + ) + """) + } else { + let primaryKey = try db.primaryKey(constraint.destinationTable) + chunks.append(""" + \(constraint.destinationTable.quotedDatabaseIdentifier)(\ + \(primaryKey.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ + ) + """) + } + if let deleteAction = constraint.deleteAction { + chunks.append("ON DELETE") + chunks.append(deleteAction.rawValue) + } + if let updateAction = constraint.updateAction { + chunks.append("ON UPDATE") + chunks.append(updateAction.rawValue) + } + if constraint.isDeferred { + chunks.append("DEFERRABLE INITIALLY DEFERRED") + } + items.append(chunks.joined(separator: " ")) + } + + for checkExpression in checkConstraints { + var chunks: [String] = [] + try chunks.append("CHECK (\(checkExpression.quotedSQL(db)))") + items.append(chunks.joined(separator: " ")) + } + + for literal in literalConstraints { + let context = SQLGenerationContext(db, argumentsSink: .literalValues) + try items.append(literal.sql(context)) + } + + chunks.append("(\(items.joined(separator: ", ")))") + } + + var tableOptions: [String] = [] + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + if options.contains(.strict) { + tableOptions.append("STRICT") + } +#else + if #available(iOS 15.4, macOS 12.4, tvOS 15.4, watchOS 8.5, *) { // SQLite 3.37+ + if options.contains(.strict) { + tableOptions.append("STRICT") + } + } +#endif + if options.contains(.withoutRowID) { + tableOptions.append("WITHOUT ROWID") + } + + if !tableOptions.isEmpty { + chunks.append(tableOptions.joined(separator: ", ")) + } + + statements.append(chunks.joined(separator: " ")) + } + + let indexStatements = try indexGenerators.map { try $0.sql(db) } + statements.append(contentsOf: indexStatements) + return statements.joined(separator: "; ") + } + + private struct ForeignKeyGenerator { + var columnNames: [String] + var columnGenerators: [SQLColumnGenerator] + var foreignKeyConstraint: SQLForeignKeyConstraint? + var indexGenerator: SQLIndexGenerator? + } +} + +extension SQLTableGenerator { + init(_ db: Database, table: TableDefinition) throws { + var indexOptions: IndexOptions = [] + if table.options.contains(.ifNotExists) { indexOptions.insert(.ifNotExists) } + + func makeKeyConstraint( + _ db: Database, + constraint: TableDefinition.KeyConstraint, + forwardPrimaryKey: SQLPrimaryKeyDescriptor) + throws -> SQLTableGenerator.KeyConstraint + { + try SQLTableGenerator.KeyConstraint( + columns: constraint.components.flatMap { component -> [String] in + switch component { + case let .columnName(columnName): + return [columnName] + case let .columnDefinition(column): + return [column.name] + case let .foreignKeyDefinition(foreignKey): + return try Self.makeForeignKeyGenerator( + db, foreignKey: foreignKey, + originTable: table.name, + forwardPrimaryKey: forwardPrimaryKey, + indexOptions: indexOptions).columnNames + } + }, + conflictResolution: constraint.conflictResolution) + } + + var forwardPrimaryKeyColumns: [SQLColumnDescriptor]? + if let primaryKeyConstraint = table.primaryKeyConstraint { + forwardPrimaryKeyColumns = try Self.forwardPrimaryKeyColumns( + db, primaryKeyConstraint: primaryKeyConstraint, + originTable: table.name) + } else { + for component in table.columnComponents { + if case let .columnDefinition(column) = component, column.primaryKey != nil { + forwardPrimaryKeyColumns = [SQLColumnDescriptor(column)] + break + } + } + } + let forwardPrimaryKey = SQLPrimaryKeyDescriptor( + tableName: table.name, + primaryKeyColumns: forwardPrimaryKeyColumns) + + var columnGenerators: [SQLColumnGenerator] = [] + var foreignKeyConstraints: [SQLForeignKeyConstraint] = [] + var indexGenerators: [SQLIndexGenerator] = [] + + for component in table.columnComponents { + switch component { + case let .columnDefinition(column): + columnGenerators.append(.columnDefinition(column)) + + case let .columnLiteral(sql): + columnGenerators.append(.columnLiteral(sql)) + + case let .foreignKeyDefinition(foreignKey): + let fkGenerator = try Self.makeForeignKeyGenerator( + db, foreignKey: foreignKey, + originTable: table.name, + forwardPrimaryKey: forwardPrimaryKey, + indexOptions: indexOptions) + columnGenerators.append(contentsOf: fkGenerator.columnGenerators) + if let indexGenerator = fkGenerator.indexGenerator { + indexGenerators.append(indexGenerator) + } + if let foreignKeyConstraint = fkGenerator.foreignKeyConstraint { + foreignKeyConstraints.append(foreignKeyConstraint) + } + + case let .foreignKeyConstraint(constraint): + foreignKeyConstraints.append(constraint) + } + } + + for columnGenerator in columnGenerators { + if case let .columnDefinition(column) = columnGenerator, + let index = column.indexDefinition(in: table.name, options: indexOptions) + { + indexGenerators.append(SQLIndexGenerator(index: index)) + } + } + + try self.init( + name: table.name, + options: table.options, + columnGenerators: columnGenerators, + forwardPrimaryKeyColumns: forwardPrimaryKeyColumns, + primaryKeyConstraint: table.primaryKeyConstraint.map { + try makeKeyConstraint(db, constraint: $0, forwardPrimaryKey: forwardPrimaryKey) + }, + uniqueKeyConstraints: table.uniqueKeyConstraints.map { + try makeKeyConstraint(db, constraint: $0, forwardPrimaryKey: forwardPrimaryKey) + }, + foreignKeyConstraints: foreignKeyConstraints, + checkConstraints: table.checkConstraints, + literalConstraints: table.literalConstraints, + indexGenerators: indexGenerators) + } + + private static func forwardPrimaryKeyColumns( + _ db: Database, + primaryKeyConstraint: TableDefinition.KeyConstraint, + originTable: String) + throws -> [SQLColumnDescriptor]? + { + var forwardPrimaryKeyColumns: [SQLColumnDescriptor] = [] + for component in primaryKeyConstraint.components { + switch component { + case let .columnDefinition(column): + forwardPrimaryKeyColumns.append(SQLColumnDescriptor(column)) + case let .foreignKeyDefinition(foreignKey): + let fkGenerator = try makeForeignKeyGenerator( + db, foreignKey: foreignKey, + originTable: originTable, + forwardPrimaryKey: nil, // not known yet, since we're building it + indexOptions: []) + for columnGenerator in fkGenerator.columnGenerators { + switch columnGenerator { + case let .columnDefinition(column): + forwardPrimaryKeyColumns.append(SQLColumnDescriptor(column)) + case .columnLiteral: + // Unknown column name + return nil + } + } + case let .columnName(name): + forwardPrimaryKeyColumns.append(SQLColumnDescriptor(name: name, type: nil)) + } + } + return forwardPrimaryKeyColumns + } + + private static func makeForeignKeyGenerator( + _ db: Database, + foreignKey: ForeignKeyDefinition, + originTable: String, + forwardPrimaryKey: SQLPrimaryKeyDescriptor?, + indexOptions: IndexOptions) + throws -> ForeignKeyGenerator + { + let destinationPrimaryKey: SQLPrimaryKeyDescriptor + + if let table = foreignKey.table { + if let forwardPrimaryKey, + originTable.lowercased() == table.lowercased() + { + // autoreference + destinationPrimaryKey = forwardPrimaryKey + } else { + destinationPrimaryKey = try foreignKey.primaryKey(db) + } + } else { + if let forwardPrimaryKey, + originTable.singularized.lowercased() == foreignKey.name.singularized.lowercased() + { + // autoreference + destinationPrimaryKey = forwardPrimaryKey + } else { + destinationPrimaryKey = try foreignKey.primaryKey(db) + } + } + + guard let primaryKeyColumns = destinationPrimaryKey.primaryKeyColumns else { + // Destination table has an hidden rowID primary key + let columnName = foreignKey.name + "Id" + let column = ColumnDefinition(name: columnName, type: .integer).references( + destinationPrimaryKey.tableName, + onDelete: foreignKey.deleteAction, + onUpdate: foreignKey.updateAction, + deferred: foreignKey.isDeferred) + if let notNullConflictResolution = foreignKey.notNullConflictResolution { + column.notNull(onConflict: notNullConflictResolution) + } + switch foreignKey.indexing { + case nil: + break + case .index: + column.indexed() + case .unique: + column.unique() + } + return ForeignKeyGenerator( + columnNames: [columnName], + columnGenerators: [SQLColumnGenerator.columnDefinition(column)], + foreignKeyConstraint: nil, + indexGenerator: nil) + } + + assert(!primaryKeyColumns.isEmpty) + let columnNames = primaryKeyColumns.map { + foreignKey.name + $0.name.uppercasingFirstCharacter + } + + if primaryKeyColumns.count == 1 { + // Destination table has a single column primary key + let pkColumn = primaryKeyColumns[0] + let columnName = columnNames[0] + let column = ColumnDefinition(name: columnName, type: pkColumn.type).references( + destinationPrimaryKey.tableName, + column: pkColumn.name, + onDelete: foreignKey.deleteAction, + onUpdate: foreignKey.updateAction, + deferred: foreignKey.isDeferred) + + if let notNullConflictResolution = foreignKey.notNullConflictResolution { + column.notNull(onConflict: notNullConflictResolution) + } + + switch foreignKey.indexing { + case nil: + break + case .index: + column.indexed() + case .unique: + column.unique() + } + + return ForeignKeyGenerator( + columnNames: [columnName], + columnGenerators: [SQLColumnGenerator.columnDefinition(column)], + foreignKeyConstraint: nil, + indexGenerator: nil) + } else { + // Destination table has a composite primary key + let columnGenerators = zip(primaryKeyColumns, columnNames).map { pkColumn, columnName in + let column = ColumnDefinition(name: columnName, type: pkColumn.type) + if let notNullConflictResolution = foreignKey.notNullConflictResolution { + column.notNull(onConflict: notNullConflictResolution) + } + return SQLColumnGenerator.columnDefinition(column) + } + + let foreignKeyConstraint = SQLForeignKeyConstraint( + columns: columnNames, + destinationTable: destinationPrimaryKey.tableName, + destinationColumns: nil, + deleteAction: foreignKey.deleteAction, + updateAction: foreignKey.updateAction, + isDeferred: foreignKey.isDeferred) + + let indexGenerator: SQLIndexGenerator? + switch foreignKey.indexing { + case nil: + indexGenerator = nil + case .index: + indexGenerator = SQLIndexGenerator( + name: Database.defaultIndexName(on: originTable, columns: columnNames), + table: originTable, + expressions: columnNames.map { .column($0) }, + options: indexOptions, + condition: nil) + case .unique: + indexGenerator = SQLIndexGenerator( + name: Database.defaultIndexName(on: originTable, columns: columnNames), + table: originTable, + expressions: columnNames.map { .column($0) }, + options: indexOptions.union([.unique]), + condition: nil) + } + + return ForeignKeyGenerator( + columnNames: columnNames, + columnGenerators: columnGenerators, + foreignKeyConstraint: foreignKeyConstraint, + indexGenerator: indexGenerator) + } + } +} + +struct SQLColumnDescriptor { + static let rowID = SQLColumnDescriptor(name: Column.rowID.name, type: .integer) + + var name: String + var type: Database.ColumnType? +} + +extension SQLColumnDescriptor { + init(_ column: ColumnInfo) { + self.init( + name: column.name, + type: column.columnType) + } + + init(_ column: ColumnDefinition) { + self.init(name: column.name, type: column.type) + } +} + +struct SQLForeignKeyConstraint { + var columns: [String] + var destinationTable: String + var destinationColumns: [String]? + var deleteAction: Database.ForeignKeyAction? + var updateAction: Database.ForeignKeyAction? + var isDeferred: Bool +} + +struct SQLPrimaryKeyDescriptor { + /// The name of the forward-declared table + var tableName: String + + /// If nil, the primary key is the hidden rowID. + var primaryKeyColumns: [SQLColumnDescriptor]? +} + +extension SQLPrimaryKeyDescriptor { + static func find(_ db: Database, table: String) throws -> Self { + let columnInfos = try db.primaryKey(table).columnInfos + return SQLPrimaryKeyDescriptor( + tableName: table, + primaryKeyColumns: columnInfos.map { columnInfos in + columnInfos.map { SQLColumnDescriptor($0) } + }) + + } +} diff --git a/GRDB/QueryInterface/SQLInterpolation+QueryInterface.swift b/GRDB/QueryInterface/SQLInterpolation+QueryInterface.swift index 0cfca9a219..eb161ab8bd 100644 --- a/GRDB/QueryInterface/SQLInterpolation+QueryInterface.swift +++ b/GRDB/QueryInterface/SQLInterpolation+QueryInterface.swift @@ -19,6 +19,16 @@ extension SQLInterpolation { appendLiteral(table.databaseTableName.quotedDatabaseIdentifier) } + /// Appends the table name. + /// + /// // SELECT * FROM player + /// let playerTable = Table("player") + /// let request: SQLRequest = "SELECT * FROM \(playerTable)" + @_disfavoredOverload + public mutating func appendInterpolation(_ table: Table) { + appendLiteral(table.tableName.quotedDatabaseIdentifier) + } + /// Appends the table name of the record. /// /// // INSERT INTO player ... @@ -28,6 +38,15 @@ extension SQLInterpolation { appendInterpolation(type(of: record)) } + /// Appends a quoted identifier. + /// + /// // INSERT INTO "group" ... + /// let tableName = "group" + /// let request: SQLRequest = "INSERT INTO \(identifier: tableName) ..." + public mutating func appendInterpolation(identifier: String) { + appendLiteral(identifier.quotedDatabaseIdentifier) + } + /// Appends the table name of the record. /// /// // INSERT INTO player ... @@ -74,7 +93,7 @@ extension SQLInterpolation { /// """ @_disfavoredOverload public mutating func appendInterpolation(_ selection: (any SQLSelectable)?) { - if let selection = selection { + if let selection { elements.append(.selection(selection.sqlSelection)) } else { appendLiteral("NULL") @@ -127,7 +146,7 @@ extension SQLInterpolation { /// """ @_disfavoredOverload public mutating func appendInterpolation(_ expressible: (any SQLExpressible)?) { - if let expressible = expressible { + if let expressible { elements.append(.expression(expressible.sqlExpression)) } else { appendLiteral("NULL") diff --git a/GRDB/QueryInterface/Schema/ColumnDefinition.swift b/GRDB/QueryInterface/Schema/ColumnDefinition.swift new file mode 100644 index 0000000000..88cf0103a3 --- /dev/null +++ b/GRDB/QueryInterface/Schema/ColumnDefinition.swift @@ -0,0 +1,572 @@ +/// Describes a database column. +/// +/// You get instances of `ColumnDefinition` when you create or alter a database +/// tables. For example: +/// +/// ```swift +/// try db.create(table: "player") { t in +/// t.column("name", .text) // ColumnDefinition +/// } +/// +/// try db.alter(table: "player") { t in +/// t.add(column: "score", .integer) // ColumnDefinition +/// } +/// ``` +/// +/// See ``TableDefinition/column(_:_:)`` and ``TableAlteration/add(column:_:)``. +/// +/// Related SQLite documentation: +/// +/// - +/// - +/// +/// ## Topics +/// +/// ### Foreign Keys +/// +/// - ``references(_:column:onDelete:onUpdate:deferred:)`` +/// +/// ### Indexes +/// +/// - ``indexed()`` +/// - ``unique(onConflict:)`` +/// +/// ### Default value +/// +/// - ``defaults(to:)`` +/// - ``defaults(sql:)`` +/// +/// ### Collations +/// +/// - ``collate(_:)-4dljx`` +/// - ``collate(_:)-9ywza`` +/// +/// ### Generated Columns +/// +/// - ``generatedAs(_:_:)`` +/// - ``generatedAs(sql:_:)`` +/// - ``GeneratedColumnQualification`` +/// +/// ### Other Constraints +/// +/// - ``check(_:)`` +/// - ``check(sql:)`` +/// - ``notNull(onConflict:)`` +/// +/// ### Sunsetted Methods +/// +/// Those are legacy interfaces that are preserved for backwards compatibility. +/// Their use is not recommended. +/// +/// - ``primaryKey(onConflict:autoincrement:)`` +public final class ColumnDefinition { + enum Indexing { + case index + case unique(Database.ConflictResolution) + } + + struct ForeignKeyConstraint { + var destinationTable: String + var destinationColumn: String? + var deleteAction: Database.ForeignKeyAction? + var updateAction: Database.ForeignKeyAction? + var isDeferred: Bool + } + + /// The kind of a generated column. + /// + /// Related SQLite documentation: + public enum GeneratedColumnQualification: Sendable { + /// A `VIRTUAL` generated column. + case virtual + /// A `STORED` generated column. + case stored + } + + struct GeneratedColumnConstraint { + var expression: SQLExpression + var qualification: GeneratedColumnQualification + } + + let name: String + let type: Database.ColumnType? + var primaryKey: (conflictResolution: Database.ConflictResolution?, autoincrement: Bool)? + var indexing: Indexing? + var notNullConflictResolution: Database.ConflictResolution? + var checkConstraints: [SQLExpression] = [] + var foreignKeyConstraints: [ForeignKeyConstraint] = [] + var defaultExpression: SQLExpression? + var collationName: String? + var generatedColumnConstraint: GeneratedColumnConstraint? + + init(name: String, type: Database.ColumnType?) { + self.name = name + self.type = type + } + + /// Adds a primary key constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // id TEXT NOT NULL PRIMARY KEY + /// // ) + /// try db.create(table: "player") { t in + /// t.primaryKey("id", .text) + /// } + /// ``` + /// + /// - important: Make sure you add a not null constraint on your primary key + /// column, as in the above example, or SQLite will allow null values. + /// See + /// for more information. + /// + /// - warning: This is a legacy interface that is preserved for backwards + /// compatibility. Use of this interface is not recommended: prefer + /// ``TableDefinition/primaryKey(_:_:onConflict:)`` + /// instead. + /// + /// - parameters: + /// - conflictResolution: An optional ``Database/ConflictResolution``. + /// - autoincrement: If true, the primary key is autoincremented. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func primaryKey( + onConflict conflictResolution: Database.ConflictResolution? = nil, + autoincrement: Bool = false) + -> Self + { + primaryKey = (conflictResolution: conflictResolution, autoincrement: autoincrement) + return self + } + + /// Adds a not null constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // name TEXT NOT NULL + /// // ) + /// try db.create(table: "player") { t in + /// t.column("name", .text).notNull() + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter conflictResolution: An optional ``Database/ConflictResolution``. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func notNull(onConflict conflictResolution: Database.ConflictResolution? = nil) -> Self { + notNullConflictResolution = conflictResolution ?? .abort + return self + } + + /// Adds a unique constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // email TEXT UNIQUE + /// // ) + /// try db.create(table: "player") { t in + /// t.column("email", .text).unique() + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter conflictResolution: An optional ``Database/ConflictResolution``. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func unique(onConflict conflictResolution: Database.ConflictResolution? = nil) -> Self { + indexing = .unique(conflictResolution ?? .abort) + return self + } + + /// Adds an index. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player(email TEXT); + /// // CREATE INDEX player_on_email ON player(email); + /// try db.create(table: "player") { t in + /// t.column("email", .text).indexed() + /// } + /// ``` + /// + /// The name of the created index is `_on_`, where `table` + /// and `column` are the names of the table and the column. See the + /// example above. + /// + /// See also ``unique(onConflict:)``. + /// + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func indexed() -> Self { + if case .none = indexing { + self.indexing = .index + } + return self + } + + /// Adds a check constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // name TEXT CHECK (LENGTH(name) > 0) + /// // ) + /// try db.create(table: "player") { t in + /// t.column("name", .text).check { length($0) > 0 } + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter condition: A closure whose argument is a ``Column`` that + /// represents the defined column, and returns the expression to check. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func check(_ condition: (Column) -> any SQLExpressible) -> Self { + checkConstraints.append(condition(Column(name)).sqlExpression) + return self + } + + /// Adds a check constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // name TEXT CHECK (LENGTH(name) > 0) + /// // ) + /// try db.create(table: "player") { t in + /// t.column("name", .text).check(sql: "LENGTH(name) > 0") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter sql: An SQL snippet. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func check(sql: String) -> Self { + checkConstraints.append(SQL(sql: sql).sqlExpression) + return self + } + + /// Defines the default value. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // email TEXT DEFAULT 'Anonymous' + /// // ) + /// try db.create(table: "player") { t in + /// t.column("name", .text).defaults(to: "Anonymous") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter value: A ``DatabaseValueConvertible`` value. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func defaults(to value: some DatabaseValueConvertible) -> Self { + defaultExpression = value.sqlExpression + return self + } + + /// Defines the default value. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // creationDate DATETIME DEFAULT CURRENT_TIMESTAMP + /// // ) + /// try db.create(table: "player") { t in + /// t.column("creationDate", .DateTime).defaults(sql: "CURRENT_TIMESTAMP") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter sql: An SQL snippet. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func defaults(sql: String) -> Self { + defaultExpression = SQL(sql: sql).sqlExpression + return self + } + + /// Defines the default collation. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // email TEXT COLLATE NOCASE + /// // ) + /// try db.create(table: "player") { t in + /// t.column("email", .text).collate(.nocase) + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter collation: A ``Database/CollationName``. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func collate(_ collation: Database.CollationName) -> Self { + collationName = collation.rawValue + return self + } + + /// Defines the default collation. + /// + /// For example: + /// + /// ```swift + /// try db.create(table: "player") { t in + /// t.column("name", .text).collate(.localizedCaseInsensitiveCompare) + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter collation: A ``DatabaseCollation``. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func collate(_ collation: DatabaseCollation) -> Self { + collationName = collation.name + return self + } + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + /// Defines the column as a generated column. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // score INTEGER NOT NULL, + /// // bonus INTEGER NOT NULL, + /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL + /// // ) + /// try db.create(table: "player") { t in + /// t.autoIncrementedPrimaryKey("id") + /// t.column("score", .integer).notNull() + /// t.column("bonus", .integer).notNull() + /// t.column("totalScore", .integer).generatedAs(sql: "score + bonus") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - sql: An SQL expression. + /// - qualification: The generated column's qualification, which + /// defaults to ``GeneratedColumnQualification/virtual``. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func generatedAs( + sql: String, + _ qualification: GeneratedColumnQualification = .virtual) + -> Self + { + let expression = SQL(sql: sql).sqlExpression + generatedColumnConstraint = GeneratedColumnConstraint( + expression: expression, + qualification: qualification) + return self + } + + /// Defines the column as a generated column. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // score INTEGER NOT NULL, + /// // bonus INTEGER NOT NULL, + /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL + /// // ) + /// try db.create(table: "player") { t in + /// t.autoIncrementedPrimaryKey("id") + /// t.column("score", .integer).notNull() + /// t.column("bonus", .integer).notNull() + /// t.column("totalScore", .integer).generatedAs(Column("score") + Column("bonus")) + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - expression: The generated expression. + /// - qualification: The generated column's qualification, which + /// defaults to ``GeneratedColumnQualification/virtual``. + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func generatedAs( + _ expression: some SQLExpressible, + _ qualification: GeneratedColumnQualification = .virtual) + -> Self + { + generatedColumnConstraint = GeneratedColumnConstraint( + expression: expression.sqlExpression, + qualification: qualification) + return self + } +#else + /// Defines the column as a generated column. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // score INTEGER NOT NULL, + /// // bonus INTEGER NOT NULL, + /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL + /// // ) + /// try db.create(table: "player") { t in + /// t.autoIncrementedPrimaryKey("id") + /// t.column("score", .integer).notNull() + /// t.column("bonus", .integer).notNull() + /// t.column("totalScore", .integer).generatedAs(sql: "score + bonus") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - sql: An SQL expression. + /// - qualification: The generated column's qualification, which + /// defaults to ``GeneratedColumnQualification/virtual``. + /// - returns: `self` so that you can further refine the column definition. + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ (3.31 actually) + @discardableResult + public func generatedAs( + sql: String, + _ qualification: GeneratedColumnQualification = .virtual) + -> Self + { + let expression = SQL(sql: sql).sqlExpression + generatedColumnConstraint = GeneratedColumnConstraint( + expression: expression, + qualification: qualification) + return self + } + + /// Defines the column as a generated column. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // score INTEGER NOT NULL, + /// // bonus INTEGER NOT NULL, + /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL + /// // ) + /// try db.create(table: "player") { t in + /// t.autoIncrementedPrimaryKey("id") + /// t.column("score", .integer).notNull() + /// t.column("bonus", .integer).notNull() + /// t.column("totalScore", .integer).generatedAs(Column("score") + Column("bonus")) + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - expression: The generated expression. + /// - qualification: The generated column's qualification, which + /// defaults to ``GeneratedColumnQualification/virtual``. + /// - returns: `self` so that you can further refine the column definition. + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ (3.31 actually) + @discardableResult + public func generatedAs( + _ expression: some SQLExpressible, + _ qualification: GeneratedColumnQualification = .virtual) + -> Self + { + generatedColumnConstraint = GeneratedColumnConstraint( + expression: expression.sqlExpression, + qualification: qualification) + return self + } +#endif + + /// Adds a foreign key constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE book( + /// // authorId INTEGER REFERENCES author(id) ON DELETE CASCADE + /// // ) + /// try db.create(table: "book") { t in + /// t.column("authorId", .integer).references("author", onDelete: .cascade) + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - table: The referenced table. + /// - column: The referenced column in the referenced table. If not + /// specified, the column of the primary key of the referenced table + /// is used. + /// - deleteAction: Optional action when the referenced row is deleted. + /// - updateAction: Optional action when the referenced row is updated. + /// - isDeferred: A boolean value indicating whether the foreign key + /// constraint is deferred. + /// See . + /// - returns: `self` so that you can further refine the column definition. + @discardableResult + public func references( + _ table: String, + column: String? = nil, + onDelete deleteAction: Database.ForeignKeyAction? = nil, + onUpdate updateAction: Database.ForeignKeyAction? = nil, + deferred isDeferred: Bool = false) -> Self + { + foreignKeyConstraints.append(ForeignKeyConstraint( + destinationTable: table, + destinationColumn: column, + deleteAction: deleteAction, + updateAction: updateAction, + isDeferred: isDeferred)) + return self + } + + func indexDefinition(in table: String, options: IndexOptions = []) -> IndexDefinition? { + switch indexing { + case .none: return nil + case .unique: return nil + case .index: + return IndexDefinition( + name: "\(table)_on_\(name)", + table: table, + expressions: [.column(name)], + options: options, + condition: nil) + } + } +} + +// Explicit non-conformance to Sendable: `ColumnDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension ColumnDefinition: Sendable { } diff --git a/GRDB/QueryInterface/Schema/Database+SchemaDefinition.swift b/GRDB/QueryInterface/Schema/Database+SchemaDefinition.swift new file mode 100644 index 0000000000..10b95ebc3a --- /dev/null +++ b/GRDB/QueryInterface/Schema/Database+SchemaDefinition.swift @@ -0,0 +1,694 @@ +extension Database { + + // MARK: - Database Schema + + /// Creates a database table. + /// + /// For example: + /// + /// ```swift + /// try db.create(table: "place") { t in + /// t.autoIncrementedPrimaryKey("id") + /// t.column("title", .text) + /// t.column("favorite", .boolean).notNull().default(false) + /// t.column("longitude", .double).notNull() + /// t.column("latitude", .double).notNull() + /// } + /// ``` + /// + /// Related SQLite documentation: + /// - + /// - + /// + /// - warning: This is a legacy interface that is preserved for backwards + /// compatibility. Use of this interface is not recommended: prefer + /// ``create(table:options:body:)`` instead. + /// + /// - parameters: + /// - name: The table name. + /// - temporary: If true, creates a temporary table. + /// - ifNotExists: If false (the default), an error is thrown if the + /// table already exists. Otherwise, the table is created unless it + /// already exists. + /// - withoutRowID: If true, uses WITHOUT ROWID optimization. + /// - body: A closure that defines table columns and constraints. + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + @_disfavoredOverload + public func create( + table name: String, + temporary: Bool = false, + ifNotExists: Bool = false, + withoutRowID: Bool = false, + body: (TableDefinition) throws -> Void) + throws + { + var options: TableOptions = [] + if temporary { options.insert(.temporary) } + if ifNotExists { options.insert(.ifNotExists) } + if withoutRowID { options.insert(.withoutRowID) } + try create(table: name, options: options, body: body) + } + + /// Creates a database table. + /// + /// ### Reference documentation + /// + /// SQLite has many reference documents about table creation. They are a + /// great learning material: + /// + /// - [CREATE TABLE](https://www.sqlite.org/lang_createtable.html) + /// - [Datatypes In SQLite](https://www.sqlite.org/datatype3.html) + /// - [SQLite Foreign Key Support](https://www.sqlite.org/foreignkeys.html) + /// - [The ON CONFLICT Clause](https://www.sqlite.org/lang_conflict.html) + /// - [Rowid Tables](https://www.sqlite.org/rowidtable.html) + /// - [The WITHOUT ROWID Optimization](https://www.sqlite.org/withoutrowid.html) + /// - [STRICT Tables](https://www.sqlite.org/stricttables.html) + /// + /// ### Usage + /// + /// ```swift + /// // CREATE TABLE place ( + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // title TEXT, + /// // isFavorite BOOLEAN NOT NULL DEFAULT 0, + /// // latitude DOUBLE NOT NULL, + /// // longitude DOUBLE NOT NULL + /// // ) + /// try db.create(table: "place") { t in + /// t.autoIncrementedPrimaryKey("id") + /// t.column("title", .text) + /// t.column("isFavorite", .boolean).notNull().default(false) + /// t.column("longitude", .double).notNull() + /// t.column("latitude", .double).notNull() + /// } + /// ``` + /// + /// ### Configure table creation + /// + /// Use the `options` parameter to configure table creation + /// (see ``TableOptions``): + /// + /// ```swift + /// // CREATE TABLE player ( ... ) + /// try db.create(table: "player") { t in ... } + /// + /// // CREATE TEMPORARY TABLE player IF NOT EXISTS ( + /// try db.create(table: "player", options: [.temporary, .ifNotExists]) { t in ... } + /// ``` + /// + /// ### Add columns + /// + /// Add columns with their name and eventual type (`text`, `integer`, + /// `double`, `real`, `numeric`, `boolean`, `blob`, `date`, `datetime` + /// and `any`) - see ``Database/ColumnType``: + /// + /// ```swift + /// // CREATE TABLE example ( + /// // a, + /// // name TEXT, + /// // creationDate DATETIME, + /// try db.create(table: "example") { t in + /// t.column("a") + /// t.column("name", .text) + /// t.column("creationDate", .datetime) + /// ``` + /// + /// The `column()` method returns a ``ColumnDefinition`` that you can + /// further configure: + /// + /// ### Not null constraints, default values + /// + /// ```swift + /// // email TEXT NOT NULL, + /// t.column("email", .text).notNull() + /// + /// // name TEXT DEFAULT 'O''Reilly', + /// t.column("name", .text).defaults(to: "O'Reilly") + /// + /// // flag BOOLEAN NOT NULL DEFAULT 0, + /// t.column("flag", .boolean).notNull().defaults(to: false) + /// + /// // creationDate DATETIME DEFAULT CURRENT_TIMESTAMP, + /// t.column("creationDate", .datetime).defaults(sql: "CURRENT_TIMESTAMP") + /// ``` + /// + /// ### Primary, unique, and foreign keys + /// + /// Use an individual column as **primary**, **unique**, or **foreign key**. + /// When defining a foreign key, the referenced column is the primary key of + /// the referenced table (unless you specify otherwise): + /// + /// ```swift + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// t.autoIncrementedPrimaryKey("id") + /// + /// // uuid TEXT NOT NULL PRIMARY KEY, + /// t.primaryKey("uuid", .text) + /// + /// // email TEXT UNIQUE, + /// t.column("email", .text) + /// .unique() + /// + /// // countryCode TEXT REFERENCES country(code) ON DELETE CASCADE, + /// t.column("countryCode", .text) + /// .references("country", onDelete: .cascade) + /// ``` + /// + /// Primary, unique and foreign keys can also be added on several columns: + /// + /// ```swift + /// // a INTEGER NOT NULL, + /// // b TEXT NOT NULL, + /// // PRIMARY KEY (a, b) + /// t.primaryKey { + /// t.column("a", .integer) + /// t.column("b", .text) + /// } + /// + /// // a INTEGER NOT NULL, + /// // b TEXT NOT NULL, + /// // PRIMARY KEY (a, b) + /// t.column("a", .integer).notNull() + /// t.column("b", .text).notNull() + /// t.primaryKey(["a", "b"]) + /// + /// // a INTEGER, + /// // b TEXT, + /// // UNIQUE (a, b) ON CONFLICT REPLACE + /// t.column("a", .integer) + /// t.column("b", .text) + /// t.uniqueKey(["a", "b"], onConflict: .replace) + /// + /// // a INTEGER, + /// // b TEXT, + /// // FOREIGN KEY (a, b) REFERENCES parents(c, d) + /// t.column("a", .integer) + /// t.column("b", .text) + /// t.foreignKey(["a", "b"], references: "parents") + /// ``` + /// + /// > Tip: when you need an integer primary key that automatically generates + /// unique values, it is recommended that you use the + /// ``TableDefinition/autoIncrementedPrimaryKey(_:onConflict:)`` method: + /// > + /// > ```swift + /// > try db.create(table: "example") { t in + /// > t.autoIncrementedPrimaryKey("id") + /// > ... + /// > } + /// > ``` + /// > + /// > The reason for this recommendation is that auto-incremented primary + /// > keys forbid the reuse of ids. This prevents your app or + /// > to think that a row was updated, when it was + /// > actually deleted and replaced. Depending on your application needs, + /// > this may be acceptable. But usually it is not. + /// + /// ### Indexed columns + /// + /// ```swift + /// t.column("score", .integer).indexed() + /// ``` + /// + /// For extra index options, see ``create(indexOn:columns:options:condition:)``. + /// + /// ### Generated columns + /// + /// See [Generated columns](https://sqlite.org/gencol.html) for + /// more information: + /// + /// ```swift + /// t.column("totalScore", .integer).generatedAs(sql: "score + bonus") + /// t.column("totalScore", .integer).generatedAs(Column("score") + Column("bonus")) + /// ``` + /// + /// ### Integrity checks + /// + /// SQLite will only let conforming rows in: + /// + /// ```swift + /// // name TEXT CHECK (LENGTH(name) > 0) + /// t.column("name", .text).check { length($0) > 0 } + /// + /// // score INTEGER CHECK (score > 0) + /// t.column("score", .integer).check(sql: "score > 0") + /// + /// // CHECK (a + b < 10), + /// t.check(Column("a") + Column("b") < 10) + /// + /// // CHECK (a + b < 10) + /// t.check(sql: "a + b < 10") + /// ``` + /// + /// ### Raw SQL columns and constraints + /// + /// Columns and constraints can be defined with raw sql: + /// + /// ```swift + /// t.column(sql: "name TEXT") + /// t.constraint(sql: "CHECK (a + b < 10)") + /// ``` + /// + /// ``SQL`` literals allow you to safely embed raw values in your SQL, + /// without any risk of syntax errors or SQL injection: + /// + /// ```swift + /// let defaultName = "O'Reilly" + /// t.column(literal: "name TEXT DEFAULT \(defaultName)") + /// + /// let forbiddenName = "admin" + /// t.constraint(literal: "CHECK (name <> \(forbiddenName))") + /// ``` + /// + /// - parameters: + /// - name: The table name. + /// - options: Table creation options. + /// - body: A closure that defines table columns and constraints. + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func create( + table name: String, + options: TableOptions = [], + body: (TableDefinition) throws -> Void) + throws + { + let table = TableDefinition( + name: name, + options: options) + try body(table) + let generator = try SQLTableGenerator(self, table: table) + let sql = try generator.sql(self) + try execute(sql: sql) + } + + /// Renames a database table. + /// + /// Related SQLite documentation: + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func rename(table name: String, to newName: String) throws { + try execute(sql: "ALTER TABLE \(name.quotedDatabaseIdentifier) RENAME TO \(newName.quotedDatabaseIdentifier)") + } + + /// Modifies a database table. + /// + /// For example: + /// + /// ```swift + /// try db.alter(table: "player") { t in + /// t.add(column: "url", .text) + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - name: The table name. + /// - body: A closure that defines table alterations. + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func alter(table name: String, body: (TableAlteration) -> Void) throws { + let alteration = TableAlteration(name: name) + body(alteration) + let generator = SQLTableAlterationGenerator(alteration) + let sql = try generator.sql(self) + try execute(sql: sql) + } + + /// Deletes a database table. + /// + /// Related SQLite documentation: + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func drop(table name: String) throws { + try execute(sql: "DROP TABLE \(name.quotedDatabaseIdentifier)") + } + + /// Creates a database view. + /// + /// You can create a view with an ``SQLRequest``: + /// + /// ```swift + /// // CREATE VIEW hero AS SELECT * FROM player WHERE isHero == 1 + /// try db.create(view: "hero", as: SQLRequest(literal: """ + /// SELECT * FROM player WHERE isHero == 1 + /// """) + /// ``` + /// + /// You can also create a view with a ``QueryInterfaceRequest``: + /// + /// ```swift + /// // CREATE VIEW hero AS SELECT * FROM player WHERE isHero == 1 + /// try db.create( + /// view: "hero", + /// as: Player.filter(Column("isHero") == true)) + /// ``` + /// + /// When creating views in , it is not recommended to + /// use record types defined in the application. Instead of the `Player` + /// record type, prefer `Table("player")`: + /// + /// ```swift + /// // RECOMMENDED IN MIGRATIONS + /// try db.create( + /// view: "hero", + /// as: Table("player").filter(Column("isHero") == true)) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - view: The view name. + /// - options: View creation options. + /// - columns: The columns of the view. If nil, the columns are the + /// columns of the request. + /// - request: The request that feeds the view. + public func create( + view name: String, + options: ViewOptions = [], + columns: [String]? = nil, + as request: SQLSubqueryable) + throws { + var literal: SQL = "CREATE " + + if options.contains(.temporary) { + literal += "TEMPORARY " + } + + literal += "VIEW " + + if options.contains(.ifNotExists) { + literal += "IF NOT EXISTS " + } + + literal += "\(identifier: name) " + + if let columns { + literal += "(" + literal += columns.map { "\(identifier: $0)" }.joined(separator: ", ") + literal += ") " + } + + literal += "AS \(request)" + + // CREATE VIEW does not support arguments, so make sure we use + // literal values. + let context = SQLGenerationContext(self, argumentsSink: .literalValues) + let sql = try literal.sql(context) + try execute(sql: sql) + } + + /// Creates a database view. + /// + /// For example: + /// + /// ```swift + /// // CREATE VIEW hero AS SELECT * FROM player WHERE isHero == 1 + /// try db.create(view: "hero", asLiteral: """ + /// SELECT * FROM player WHERE isHero == 1 + /// """) + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - view: The view name. + /// - options: View creation options. + /// - columns: The columns of the view. If nil, the columns are the + /// columns of the request. + /// - sqlLiteral: An `SQL` literal. + public func create( + view name: String, + options: ViewOptions = [], + columns: [String]? = nil, + asLiteral sqlLiteral: SQL) + throws { + try create(view: name, options: options, columns: columns, as: SQLRequest(literal: sqlLiteral)) + } + + /// Deletes a database view. + /// + /// Related SQLite documentation: + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func drop(view name: String) throws { + try execute(sql: "DROP VIEW \(name.quotedDatabaseIdentifier)") + } + + /// Creates an index on the specified table and columns. + /// + /// For example: + /// + /// ```swift + /// // CREATE INDEX index_player_on_email ON player(email) + /// try db.create(index: "index_player_on_email", on: "player", columns: ["email"]) + /// ``` + /// + /// SQLite can also index expressions () + /// and use specific collations. To create such an index, use + /// ``create(index:on:expressions:options:condition:)``. + /// + /// Related SQLite documentation: + /// + /// - warning: This is a legacy interface that is preserved for backwards + /// compatibility. Use of this interface is not recommended: prefer + /// ``create(indexOn:columns:options:condition:)`` instead. + /// + /// - parameters: + /// - name: The index name. + /// - table: The name of the indexed table. + /// - columns: The indexed columns. + /// - unique: If true, creates a unique index. + /// - ifNotExists: If true, no error is thrown if index already exists. + /// - condition: If not nil, creates a partial index + /// (see ). + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + @_disfavoredOverload + public func create( + index name: String, + on table: String, + columns: [String], + unique: Bool = false, + ifNotExists: Bool = false, + condition: (any SQLExpressible)? = nil) + throws + { + var options: IndexOptions = [] + if ifNotExists { options.insert(.ifNotExists) } + if unique { options.insert(.unique) } + try create(index: name, on: table, columns: columns, options: options, condition: condition) + } + + /// Creates an index on the specified table and columns. + /// + /// For example: + /// + /// ```swift + /// // CREATE INDEX index_player_on_email ON player(email) + /// try db.create(index: "index_player_on_email", on: "player", columns: ["email"]) + /// ``` + /// + /// To create a unique index, specify the `.unique` option: + /// + /// ```swift + /// // CREATE UNIQUE INDEX index_player_on_email ON player(email) + /// try db.create(index: "index_player_on_email", on: "player", columns: ["email"], options: .unique) + /// ``` + /// + /// SQLite can also index expressions () + /// and use specific collations. To create such an index, use a raw SQL + /// query: + /// + /// ```swift + /// try db.execute(sql: "CREATE INDEX ...") + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - name: The index name. + /// - table: The name of the indexed table. + /// - columns: The indexed columns. + /// - options: Index creation options. + /// - condition: If not nil, creates a partial index + /// (see ). + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func create( + index name: String, + on table: String, + columns: [String], + options: IndexOptions = [], + condition: (any SQLExpressible)? = nil) + throws + { + let index = IndexDefinition( + name: name, + table: table, + expressions: columns.map { .column($0) }, + options: options, + condition: condition?.sqlExpression) + let generator = SQLIndexGenerator(index: index) + let sql = try generator.sql(self) + try execute(sql: sql) + } + + /// Creates an index on the specified table and expressions. + /// + /// This method can generally create indexes on expressions (see + /// ): + /// + /// ```swift + /// // CREATE INDEX txy ON t(x+y) + /// try db.create( + /// index: "txy", + /// on: "t", + /// expressions: [Column("x") + Column("y")]) + /// ``` + /// + /// In particular, you can specify the collation on indexed + /// columns (see ): + /// + /// ```swift + /// // CREATE INDEX index_player_name ON player(name COLLATE NOCASE) + /// try db.create( + /// index: "index_player_name", + /// on: "player", + /// expressions: [Column("name").collating(.nocase)]) + /// ``` + /// + /// - parameters: + /// - name: The index name. + /// - table: The name of the indexed table. + /// - expressions: The indexed expressions. + /// - options: Index creation options. + /// - condition: If not nil, creates a partial index + /// (see ). + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func create( + index name: String, + on table: String, + expressions: [any SQLExpressible], + options: IndexOptions = [], + condition: (any SQLExpressible)? = nil) + throws + { + let index = IndexDefinition( + name: name, + table: table, + expressions: expressions.map { $0.sqlExpression }, + options: options, + condition: condition?.sqlExpression) + let generator = SQLIndexGenerator(index: index) + let sql = try generator.sql(self) + try execute(sql: sql) + } + + /// Creates an index with a default name on the specified table and columns. + /// + /// The created index is named after the table and the column name(s): + /// + /// ```swift + /// // CREATE INDEX index_player_on_email ON player(email) + /// try db.create(indexOn: "player", columns: ["email"]) + /// ``` + /// + /// To create a unique index, specify the `.unique` option: + /// + /// ```swift + /// // CREATE UNIQUE INDEX index_player_on_email ON player(email) + /// try db.create(indexOn: "player", columns: ["email"], options: .unique) + /// ``` + /// + /// In order to specify the index name, use + /// ``create(index:on:columns:options:condition:)`` instead. + /// + /// SQLite can also index expressions () + /// and use specific collations. To create such an index, use + /// ``create(index:on:expressions:options:condition:)``. + /// + /// Related SQLite documentation: + /// + /// - parameters: + /// - table: The name of the indexed table. + /// - columns: The indexed columns. + /// - options: Index creation options. + /// - condition: If not nil, creates a partial index + /// (see ). + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func create( + indexOn table: String, + columns: [String], + options: IndexOptions = [], + condition: (any SQLExpressible)? = nil) + throws + { + try create( + index: Database.defaultIndexName(on: table, columns: columns), + on: table, + columns: columns, + options: options, + condition: condition) + } + + /// Deletes a database index. + /// + /// Related SQLite documentation: + /// + /// - parameter name: The index name. + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func drop(index name: String) throws { + try execute(sql: "DROP INDEX \(name.quotedDatabaseIdentifier)") + } + + /// Deletes the database index on the specified table and columns + /// if exactly one such index exists. + /// + /// - parameters: + /// - table: The name of the indexed table. + /// - columns: The indexed columns. + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func drop(indexOn table: String, columns: [String]) throws { + let lowercasedColumns = columns.map { $0.lowercased() } + let indexes = try indexes(on: table).filter { index in + index.columns.map({ $0.lowercased() }) == lowercasedColumns + } + if let index = indexes.first, indexes.count == 1 { + try drop(index: index.name) + } + } + + /// Deletes and recreates from scratch all indices that use this collation. + /// + /// This method is useful when the definition of a collation sequence + /// has changed. + /// + /// Related SQLite documentation: + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func reindex(collation: Database.CollationName) throws { + try execute(sql: "REINDEX \(collation.rawValue)") + } + + /// Deletes and recreates from scratch all indices that use this collation. + /// + /// This method is useful when the definition of a collation sequence + /// has changed. + /// + /// Related SQLite documentation: + /// + /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. + public func reindex(collation: DatabaseCollation) throws { + try reindex(collation: Database.CollationName(rawValue: collation.name)) + } +} + +/// View creation options +public struct ViewOptions: OptionSet, Sendable { + public let rawValue: Int + + public init(rawValue: Int) { self.rawValue = rawValue } + + /// Only creates the view if it does not already exist. + public static let ifNotExists = ViewOptions(rawValue: 1 << 0) + + /// Creates a temporary view. + public static let temporary = ViewOptions(rawValue: 1 << 1) +} diff --git a/GRDB/QueryInterface/Schema/ForeignKeyDefinition.swift b/GRDB/QueryInterface/Schema/ForeignKeyDefinition.swift new file mode 100644 index 0000000000..e7754a0527 --- /dev/null +++ b/GRDB/QueryInterface/Schema/ForeignKeyDefinition.swift @@ -0,0 +1,111 @@ +/// Describes an association in the database schema. +/// +/// You get instances of `ForeignKeyDefinition` when you create a database +/// tables. For example: +/// +/// ```swift +/// try db.create(table: "player") { t in +/// t.belongsTo("team") // ForeignKeyDefinition +/// } +/// ``` +/// +/// See ``TableDefinition/belongsTo(_:inTable:onDelete:onUpdate:deferred:indexed:)``. +public final class ForeignKeyDefinition { + enum Indexing { + case index + case unique + } + + var name: String + var table: String? + var deleteAction: Database.ForeignKeyAction? + var updateAction: Database.ForeignKeyAction? + var indexing: Indexing? + var isDeferred: Bool + var notNullConflictResolution: Database.ConflictResolution? + + init( + name: String, + table: String?, + deleteAction: Database.ForeignKeyAction?, + updateAction: Database.ForeignKeyAction?, + isIndexed: Bool, + isDeferred: Bool) + { + self.name = name + self.table = table + self.deleteAction = deleteAction + self.updateAction = updateAction + self.indexing = isIndexed ? .index : nil + self.isDeferred = isDeferred + } + + /// Adds a not null constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // teamId INTEGER NOT NULL REFERENCES team(id) + /// // ) + /// try db.create(table: "player") { t in + /// t.belongsTo("team").notNull() + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter conflictResolution: An optional ``Database/ConflictResolution``. + /// - returns: `self` so that you can further refine the definition of + /// the association. + @discardableResult + public func notNull(onConflict conflictResolution: Database.ConflictResolution? = nil) -> Self { + notNullConflictResolution = conflictResolution ?? .abort + return self + } + + /// Adds a unique constraint. + /// + /// For example: + /// + /// ```swift + /// // CREATE TABLE player( + /// // teamId INTEGER UNIQUE REFERENCES team(id) + /// // ) + /// try db.create(table: "player") { t in + /// t.belongsTo("team").unique() + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - returns: `self` so that you can further refine the definition of + /// the association. + @discardableResult + public func unique() -> Self { + indexing = .unique + return self + } + + func primaryKey(_ db: Database) throws -> SQLPrimaryKeyDescriptor { + if let table { + return try SQLPrimaryKeyDescriptor.find(db, table: table) + } + + if try db.tableExists(name) { + return try SQLPrimaryKeyDescriptor.find(db, table: name) + } + + let pluralizedName = name.pluralized + if try db.tableExists(pluralizedName) { + return try SQLPrimaryKeyDescriptor.find(db, table: pluralizedName) + } + + throw DatabaseError.noSuchTable(name) + } +} + +// Explicit non-conformance to Sendable: `ForeignKeyDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension ForeignKeyDefinition: Sendable { } diff --git a/GRDB/QueryInterface/Schema/IndexDefinition.swift b/GRDB/QueryInterface/Schema/IndexDefinition.swift new file mode 100644 index 0000000000..567c4beffd --- /dev/null +++ b/GRDB/QueryInterface/Schema/IndexDefinition.swift @@ -0,0 +1,26 @@ +struct IndexDefinition { + let name: String + let table: String + let expressions: [SQLExpression] + let options: IndexOptions + let condition: SQLExpression? +} + +/// Index creation options +public struct IndexOptions: OptionSet, Sendable { + public let rawValue: Int + + public init(rawValue: Int) { self.rawValue = rawValue } + + /// Only creates the index if it does not already exist. + public static let ifNotExists = IndexOptions(rawValue: 1 << 0) + + /// Creates a unique index. + public static let unique = IndexOptions(rawValue: 1 << 1) +} + +extension Database { + static func defaultIndexName(on table: String, columns: [String]) -> String { + "index_\(table)_on_\(columns.joined(separator: "_"))" + } +} diff --git a/GRDB/QueryInterface/Schema/TableAlteration.swift b/GRDB/QueryInterface/Schema/TableAlteration.swift new file mode 100644 index 0000000000..c731e7ea54 --- /dev/null +++ b/GRDB/QueryInterface/Schema/TableAlteration.swift @@ -0,0 +1,168 @@ +/// A `TableDefinition` lets you modify the components of a database table. +/// +/// You don't create instances of this class. Instead, you use the `Database` +/// ``Database/alter(table:body:)`` method: +/// +/// ```swift +/// try db.alter(table: "player") { t in // t is TableAlteration +/// t.add(column: "bonus", .integer) +/// } +/// ``` +/// +/// Related SQLite documentation: +public final class TableAlteration { + let name: String + + enum TableAlterationKind { + case add(ColumnDefinition) + case addColumnLiteral(SQL) + case rename(old: String, new: String) + case drop(String) + } + + var alterations: [TableAlterationKind] = [] + + init(name: String) { + self.name = name + } + + /// Appends a column. + /// + /// For example: + /// + /// ```swift + /// // ALTER TABLE player ADD COLUMN bonus integer + /// try db.alter(table: "player") { t in + /// t.add(column: "bonus", .integer) + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter name: the column name. + /// - parameter type: the column type. + /// - returns: An ColumnDefinition that allows you to refine the + /// column definition. + @discardableResult + public func add(column name: String, _ type: Database.ColumnType? = nil) -> ColumnDefinition { + let column = ColumnDefinition(name: name, type: type) + alterations.append(.add(column)) + return column + } + + /// Appends a column. + /// + /// For example: + /// + /// ```swift + /// // ALTER TABLE player ADD COLUMN bonus integer + /// try db.alter(table: "player") { t in + /// t.addColumn(sql: "bonus integer") + /// } + /// ``` + public func addColumn(sql: String) { + alterations.append(.addColumnLiteral(SQL(sql: sql))) + } + + /// Appends a column. + /// + /// ``SQL`` literals allow you to safely embed raw values in your SQL, + /// without any risk of syntax errors or SQL injection: + /// + /// ```swift + /// // ALTER TABLE player ADD COLUMN name TEXT DEFAULT 'Anonymous' + /// try db.alter(table: "player") { t in + /// t.addColumn(literal: "name TEXT DEFAULT \(defaultName)") + /// } + /// ``` + public func addColumn(literal: SQL) { + alterations.append(.addColumnLiteral(literal)) + } + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + /// Renames a column. + /// + /// For example: + /// + /// ```swift + /// try db.alter(table: "player") { t in + /// t.rename(column: "url", to: "homeURL") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter name: the old name of the column. + /// - parameter newName: the new name of the column. + public func rename(column name: String, to newName: String) { + _rename(column: name, to: newName) + } + + /// Drops a column. + /// + /// For example: + /// + /// ```swift + /// try db.alter(table: "player") { t in + /// t.drop(column: "age") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameter name: the name of the column to drop. + public func drop(column name: String) { + _drop(column: name) + } +#else + /// Renames a column. + /// + /// For example: + /// + /// ```swift + /// try db.alter(table: "player") { t in + /// t.rename(column: "url", to: "homeURL") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - parameter name: the old name of the column. + /// - parameter newName: the new name of the column. + @available(iOS 13, tvOS 13, watchOS 6, *) // SQLite 3.25+ + public func rename(column name: String, to newName: String) { + _rename(column: name, to: newName) + } + + /// Drops a column. + /// + /// For example: + /// + /// ```swift + /// try db.alter(table: "player") { t in + /// t.drop(column: "age") + /// } + /// ``` + /// + /// Related SQLite documentation: + /// + /// - Parameter name: the name of the column to drop. + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ + public func drop(column name: String) { + _drop(column: name) + } +#endif + + private func _rename(column name: String, to newName: String) { + alterations.append(.rename(old: name, new: newName)) + } + + private func _drop(column name: String) { + alterations.append(.drop(name)) + } +} + +// Explicit non-conformance to Sendable: `TableAlteration` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension TableAlteration: Sendable { } diff --git a/GRDB/QueryInterface/Schema/TableDefinition.swift b/GRDB/QueryInterface/Schema/TableDefinition.swift index 9beba1c2c1..235c54bf88 100644 --- a/GRDB/QueryInterface/Schema/TableDefinition.swift +++ b/GRDB/QueryInterface/Schema/TableDefinition.swift @@ -1,447 +1,5 @@ -extension Database { - - // MARK: - Database Schema - - // TODO: deprecate just before GRDB 6 - /// Creates a database table. - /// - /// For example: - /// - /// ```swift - /// try db.create(table: "place") { t in - /// t.autoIncrementedPrimaryKey("id") - /// t.column("title", .text) - /// t.column("favorite", .boolean).notNull().default(false) - /// t.column("longitude", .double).notNull() - /// t.column("latitude", .double).notNull() - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - - /// - - /// - /// - warning: This is a legacy interface that is preserved for backwards - /// compatibility. Use of this interface is not recommended: prefer - /// ``create(table:options:body:)`` instead. - /// - /// - parameters: - /// - name: The table name. - /// - temporary: If true, creates a temporary table. - /// - ifNotExists: If false (the default), an error is thrown if the - /// table already exists. Otherwise, the table is created unless it - /// already exists. - /// - withoutRowID: If true, uses WITHOUT ROWID optimization. - /// - body: A closure that defines table columns and constraints. - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - @_disfavoredOverload - public func create( - table name: String, - temporary: Bool = false, - ifNotExists: Bool = false, - withoutRowID: Bool = false, - body: (TableDefinition) throws -> Void) - throws - { - var options: TableOptions = [] - if temporary { options.insert(.temporary) } - if ifNotExists { options.insert(.ifNotExists) } - if withoutRowID { options.insert(.withoutRowID) } - try create(table: name, options: options, body: body) - } - - /// Creates a database table. - /// - /// ### Reference documentation - /// - /// SQLite has many reference documents about table creation. They are a - /// great learning material: - /// - /// - [CREATE TABLE](https://www.sqlite.org/lang_createtable.html) - /// - [Datatypes In SQLite](https://www.sqlite.org/datatype3.html) - /// - [SQLite Foreign Key Support](https://www.sqlite.org/foreignkeys.html) - /// - [The ON CONFLICT Clause](https://www.sqlite.org/lang_conflict.html) - /// - [Rowid Tables](https://www.sqlite.org/rowidtable.html) - /// - [The WITHOUT ROWID Optimization](https://www.sqlite.org/withoutrowid.html) - /// - [STRICT Tables](https://www.sqlite.org/stricttables.html) - /// - /// ### Usage - /// - /// ```swift - /// // CREATE TABLE place ( - /// // id INTEGER PRIMARY KEY AUTOINCREMENT, - /// // title TEXT, - /// // isFavorite BOOLEAN NOT NULL DEFAULT 0, - /// // latitude DOUBLE NOT NULL, - /// // longitude DOUBLE NOT NULL - /// // ) - /// try db.create(table: "place") { t in - /// t.autoIncrementedPrimaryKey("id") - /// t.column("title", .text) - /// t.column("isFavorite", .boolean).notNull().default(false) - /// t.column("longitude", .double).notNull() - /// t.column("latitude", .double).notNull() - /// } - /// ``` - /// - /// ### Configure table creation - /// - /// Use the `options` parameter to configure table creation - /// (see ``TableOptions``): - /// - /// ```swift - /// // CREATE TABLE player ( ... ) - /// try db.create(table: "player") { t in ... } - /// - /// // CREATE TEMPORARY TABLE player IF NOT EXISTS ( - /// try db.create(table: "player", options: [.temporary, .ifNotExists]) { t in ... } - /// ``` - /// - /// ### Add columns - /// - /// Add columns with their name and eventual type (`text`, `integer`, - /// `double`, `real`, `numeric`, `boolean`, `blob`, `date`, `datetime` - /// and `any`) - see ``Database/ColumnType``: - /// - /// ```swift - /// // CREATE TABLE example ( - /// // a, - /// // name TEXT, - /// // creationDate DATETIME, - /// try db.create(table: "example") { t in - /// t.column("a") - /// t.column("name", .text) - /// t.column("creationDate", .datetime) - /// ``` - /// - /// The `column()` method returns a ``ColumnDefinition`` that you can - /// further configure: - /// - /// ### Not null constraints, default values - /// - /// ```swift - /// // email TEXT NOT NULL, - /// t.column("email", .text).notNull() - /// - /// // name TEXT DEFAULT 'O''Reilly', - /// t.column("name", .text).defaults(to: "O'Reilly") - /// - /// // flag BOOLEAN NOT NULL DEFAULT 0, - /// t.column("flag", .boolean).notNull().defaults(to: false) - /// - /// // creationDate DATETIME DEFAULT CURRENT_TIMESTAMP, - /// t.column("creationDate", .datetime).defaults(sql: "CURRENT_TIMESTAMP") - /// ``` - /// - /// ### Primary, unique, and foreign keys - /// - /// Use an individual column as **primary**, **unique**, or **foreign key**. - /// When defining a foreign key, the referenced column is the primary key of - /// the referenced table (unless you specify otherwise): - /// - /// ```swift - /// // id INTEGER PRIMARY KEY AUTOINCREMENT, - /// t.autoIncrementedPrimaryKey("id") - /// - /// // uuid TEXT NOT NULL PRIMARY KEY, - /// t.primaryKey("uuid", .text) - /// - /// // email TEXT UNIQUE, - /// t.column("email", .text) - /// .unique() - /// - /// // countryCode TEXT REFERENCES country(code) ON DELETE CASCADE, - /// t.column("countryCode", .text) - /// .references("country", onDelete: .cascade) - /// ``` - /// - /// Primary, unique and foreign keys can also be added on several columns: - /// - /// ```swift - /// // a INTEGER NOT NULL, - /// // b TEXT NOT NULL, - /// // PRIMARY KEY (a, b), - /// t.primaryKey { - /// t.column("a", .integer) - /// t.column("b", .text) - /// } - /// - /// // a INTEGER, - /// // b TEXT, - /// // UNIQUE (a, b) ON CONFLICT REPLACE, - /// t.column("a", .integer) - /// t.column("b", .text) - /// t.uniqueKey(["a", "b"], onConflict: .replace) - /// - /// // a INTEGER, - /// // b TEXT, - /// // FOREIGN KEY (a, b) REFERENCES parents(c, d), - /// t.column("a", .integer) - /// t.column("b", .text) - /// t.foreignKey(["a", "b"], references: "parents") - /// ``` - /// - /// > Tip: when you need an integer primary key that automatically generates - /// unique values, it is recommended that you use the - /// ``TableDefinition/autoIncrementedPrimaryKey(_:onConflict:)`` method: - /// > - /// > ```swift - /// > try db.create(table: "example") { t in - /// > t.autoIncrementedPrimaryKey("id") - /// > ... - /// > } - /// > ``` - /// > - /// > The reason for this recommendation is that auto-incremented primary - /// > keys forbid the reuse of ids. This prevents your app or - /// > to think that a row was updated, when it was - /// > actually deleted and replaced. Depending on your application needs, - /// > this may be acceptable. But usually it is not. - /// - /// ### Indexed columns - /// - /// ```swift - /// t.column("score", .integer).indexed() - /// ``` - /// - /// For extra index options, see ``create(index:on:columns:options:condition:)``. - /// - /// ### Generated columns - /// - /// See [Generated columns](https://sqlite.org/gencol.html) for - /// more information: - /// - /// ```swift - /// t.column("totalScore", .integer).generatedAs(sql: "score + bonus") - /// t.column("totalScore", .integer).generatedAs(Column("score") + Column("bonus")) - /// ``` - /// - /// ### Integrity checks - /// - /// SQLite will only let conforming rows in: - /// - /// ```swift - /// // name TEXT CHECK (LENGTH(name) > 0) - /// t.column("name", .text).check { length($0) > 0 } - /// - /// // score INTEGER CHECK (score > 0) - /// t.column("score", .integer).check(sql: "score > 0") - /// - /// // CHECK (a + b < 10), - /// t.check(Column("a") + Column("b") < 10) - /// - /// // CHECK (a + b < 10) - /// t.check(sql: "a + b < 10") - /// ``` - /// - /// ### Raw SQL columns and constraints - /// - /// Columns and constraints can be defined with raw sql: - /// - /// ```swift - /// t.column(sql: "name TEXT") - /// t.constraint(sql: "CHECK (a + b < 10)") - /// ``` - /// - /// ``SQL`` literals allow you to safely embed raw values in your SQL, - /// without any risk of syntax errors or SQL injection: - /// - /// ```swift - /// let defaultName = "O'Reilly" - /// t.column(literal: "name TEXT DEFAULT \(defaultName)") - /// - /// let forbiddenName = "admin" - /// t.constraint(literal: "CHECK (name <> \(forbiddenName))") - /// ``` - /// - /// - parameters: - /// - name: The table name. - /// - options: Table creation options. - /// - body: A closure that defines table columns and constraints. - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func create( - table name: String, - options: TableOptions = [], - body: (TableDefinition) throws -> Void) - throws - { - let definition = TableDefinition( - name: name, - options: options) - try body(definition) - let sql = try definition.sql(self) - try execute(sql: sql) - } - - /// Renames a database table. - /// - /// Related SQLite documentation: - /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func rename(table name: String, to newName: String) throws { - try execute(sql: "ALTER TABLE \(name.quotedDatabaseIdentifier) RENAME TO \(newName.quotedDatabaseIdentifier)") - } - - /// Modifies a database table. - /// - /// For example: - /// - /// ```swift - /// try db.alter(table: "player") { t in - /// t.add(column: "url", .text) - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameters: - /// - name: The table name. - /// - body: A closure that defines table alterations. - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func alter(table name: String, body: (TableAlteration) -> Void) throws { - let alteration = TableAlteration(name: name) - body(alteration) - let sql = try alteration.sql(self) - try execute(sql: sql) - } - - /// Deletes a database table. - /// - /// Related SQLite documentation: - /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func drop(table name: String) throws { - try execute(sql: "DROP TABLE \(name.quotedDatabaseIdentifier)") - } - - // TODO: deprecate just before GRDB 6 - /// Creates an index. - /// - /// For example: - /// - /// ```swift - /// try db.create(index: "playerByEmail", on: "player", columns: ["email"]) - /// ``` - /// - /// SQLite can also index expressions () - /// and use specific collations. To create such an index, use a raw SQL - /// query: - /// - /// ```swift - /// try db.execute(sql: "CREATE INDEX ...") - /// ``` - /// - /// Related SQLite documentation: - /// - /// - warning: This is a legacy interface that is preserved for backwards - /// compatibility. Use of this interface is not recommended: prefer - /// ``create(index:on:columns:options:condition:)`` instead. - /// - /// - parameters: - /// - name: The index name. - /// - table: The name of the indexed table. - /// - columns: The indexed columns. - /// - unique: If true, creates a unique index. - /// - ifNotExists: If true, no error is thrown if index already exists. - /// - condition: If not nil, creates a partial index - /// (see ). - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - @_disfavoredOverload - public func create( - index name: String, - on table: String, - columns: [String], - unique: Bool = false, - ifNotExists: Bool = false, - condition: (any SQLExpressible)? = nil) - throws - { - var options: IndexOptions = [] - if ifNotExists { options.insert(.ifNotExists) } - if unique { options.insert(.unique) } - try create(index: name, on: table, columns: columns, options: options, condition: condition) - } - - /// Creates an index. - /// - /// For example: - /// - /// ```swift - /// try db.create(index: "playerByEmail", on: "player", columns: ["email"]) - /// ``` - /// - /// SQLite can also index expressions () - /// and use specific collations. To create such an index, use a raw SQL - /// query: - /// - /// ```swift - /// try db.execute(sql: "CREATE INDEX ...") - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameters: - /// - name: The index name. - /// - table: The name of the indexed table. - /// - columns: The indexed columns. - /// - options: Index creation options. - /// - condition: If not nil, creates a partial index - /// (see ). - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func create( - index name: String, - on table: String, - columns: [String], - options: IndexOptions = [], - condition: (any SQLExpressible)? = nil) - throws - { - let definition = IndexDefinition( - name: name, - table: table, - columns: columns, - options: options, - condition: condition?.sqlExpression) - let sql = try definition.sql(self) - try execute(sql: sql) - } - - /// Deletes a database index. - /// - /// Related SQLite documentation: - /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func drop(index name: String) throws { - try execute(sql: "DROP INDEX \(name.quotedDatabaseIdentifier)") - } - - /// Deletes and recreates from scratch all indices that use this collation. - /// - /// This method is useful when the definition of a collation sequence - /// has changed. - /// - /// Related SQLite documentation: - /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func reindex(collation: Database.CollationName) throws { - try execute(sql: "REINDEX \(collation.rawValue)") - } - - /// Deletes and recreates from scratch all indices that use this collation. - /// - /// This method is useful when the definition of a collation sequence - /// has changed. - /// - /// Related SQLite documentation: - /// - /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. - public func reindex(collation: DatabaseCollation) throws { - try reindex(collation: Database.CollationName(rawValue: collation.name)) - } -} - /// Table creation options. -public struct TableOptions: OptionSet { +public struct TableOptions: OptionSet, Sendable { public let rawValue: Int public init(rawValue: Int) { self.rawValue = rawValue } @@ -469,8 +27,8 @@ public struct TableOptions: OptionSet { /// A `TableDefinition` lets you define the components of a database table. /// -/// You don't create instances of this class. Instead, you use the `Database` -/// ``Database/create(table:options:body:)`` method: +/// See the documentation of the `Database` +/// ``Database/create(table:options:body:)`` method for usage information: /// /// ```swift /// try db.create(table: "player") { t in // t is TableDefinition @@ -493,10 +51,13 @@ public struct TableOptions: OptionSet { /// - ``autoIncrementedPrimaryKey(_:onConflict:)`` /// - ``primaryKey(_:_:onConflict:)`` /// - ``primaryKey(onConflict:body:)`` +/// - ``primaryKey(_:onConflict:)`` /// /// ### Define a Foreign Key /// +/// - ``belongsTo(_:inTable:onDelete:onUpdate:deferred:indexed:)`` /// - ``foreignKey(_:references:columns:onDelete:onUpdate:deferred:)`` +/// - ``ForeignKeyDefinition`` /// /// ### Define a Unique Key /// @@ -504,70 +65,56 @@ public struct TableOptions: OptionSet { /// /// ### Define Others Constraints /// -/// - ``check(_:)`` +/// - ``check(_:)-6u1za`` +/// - ``check(_:)-jpcg`` /// - ``check(sql:)`` /// - ``constraint(literal:)`` /// - ``constraint(sql:)`` -/// -/// ### Sunsetted Methods -/// -/// Those are legacy interfaces that are preserved for backwards compatibility. -/// Their use is not recommended. -/// -/// - ``primaryKey(_:onConflict:)`` public final class TableDefinition { struct KeyConstraint { - var columns: [String] + enum Component { + case columnName(String) + case columnDefinition(ColumnDefinition) + case foreignKeyDefinition(ForeignKeyDefinition) + } + var components: [Component] var conflictResolution: Database.ConflictResolution? - } - - private struct ForeignKeyConstraint { - var columns: [String] - var table: String - var destinationColumns: [String]? - var deleteAction: Database.ForeignKeyAction? - var updateAction: Database.ForeignKeyAction? - var deferred: Bool - } - - private enum ColumnItem { - case definition(ColumnDefinition) - case literal(SQL) - var columnDefinition: ColumnDefinition? { - switch self { - case let .definition(def): return def - case .literal: return nil - } + init(components: [Component], conflictResolution: Database.ConflictResolution?) { + self.components = components + self.conflictResolution = conflictResolution } - func sql(_ db: Database, tableName: String, primaryKeyColumns: [String]?) throws -> String { - switch self { - case let .definition(def): - return try def.sql(db, tableName: tableName, primaryKeyColumns: primaryKeyColumns) - case let .literal(sqlLiteral): - let context = SQLGenerationContext(db, argumentsSink: .forRawSQL) - return try sqlLiteral.sql(context) + init(columns: [String], conflictResolution: Database.ConflictResolution?) { + let components = columns.map { name in + Component.columnName(name) } + self.init(components: components, conflictResolution: conflictResolution) } } - private let name: String - private let options: TableOptions - private var columns: [ColumnItem] = [] - private var inPrimaryKeyBody = false - private var primaryKeyConstraint: KeyConstraint? - private var uniqueKeyConstraints: [KeyConstraint] = [] - private var foreignKeyConstraints: [ForeignKeyConstraint] = [] - private var checkConstraints: [SQLExpression] = [] - private var literalConstraints: [SQL] = [] + enum ColumnComponent { + case columnDefinition(ColumnDefinition) + case columnLiteral(SQL) + case foreignKeyDefinition(ForeignKeyDefinition) + case foreignKeyConstraint(SQLForeignKeyConstraint) + } + + let name: String + let options: TableOptions + var columnComponents: [ColumnComponent] = [] + var inPrimaryKeyBody = false + var primaryKeyConstraint: KeyConstraint? + var uniqueKeyConstraints: [KeyConstraint] = [] + var checkConstraints: [SQLExpression] = [] + var literalConstraints: [SQL] = [] init(name: String, options: TableOptions) { self.name = name self.options = options } - /// Defines the auto-incremented primary key. + /// Appends an auto-incremented primary key column. /// /// For example: /// @@ -601,7 +148,7 @@ public final class TableDefinition { column(name, .integer).primaryKey(onConflict: conflictResolution, autoincrement: true) } - /// Defines the primary key on a single column. + /// Appends a primary key column. /// /// For example: /// @@ -636,7 +183,7 @@ public final class TableDefinition { } } - /// Defines the primary key on multiple columns. + /// Defines the primary key on wrapped columns. /// /// For example: /// @@ -656,7 +203,7 @@ public final class TableDefinition { /// } /// ``` /// - /// A NOT NULL constraint is always added to the primary key columns. + /// A NOT NULL constraint is always added to the wrapped primary key columns. public func primaryKey( onConflict conflictResolution: Database.ConflictResolution? = nil, body: () throws -> Void) @@ -666,7 +213,7 @@ public final class TableDefinition { // Programmer error fatalError("can't define several primary keys") } - primaryKeyConstraint = KeyConstraint(columns: [], conflictResolution: conflictResolution) + primaryKeyConstraint = KeyConstraint(components: [], conflictResolution: conflictResolution) let oldValue = inPrimaryKeyBody inPrimaryKeyBody = true @@ -696,13 +243,13 @@ public final class TableDefinition { @discardableResult public func column(_ name: String, _ type: Database.ColumnType? = nil) -> ColumnDefinition { let column = ColumnDefinition(name: name, type: type) - columns.append(.definition(column)) + columnComponents.append(.columnDefinition(column)) if inPrimaryKeyBody { // Add a not null constraint in order to fix an SQLite bug: // column.notNull() - primaryKeyConstraint!.columns.append(name) + primaryKeyConstraint!.components.append(.columnDefinition(column)) } return column @@ -721,8 +268,7 @@ public final class TableDefinition { /// } /// ``` public func column(sql: String) { - GRDBPrecondition(!inPrimaryKeyBody, "Primary key columns can not be defined with raw SQL") - columns.append(.literal(SQL(sql: sql))) + column(literal: SQL(sql: sql)) } /// Appends a table column. @@ -741,10 +287,10 @@ public final class TableDefinition { /// ``` public func column(literal: SQL) { GRDBPrecondition(!inPrimaryKeyBody, "Primary key columns can not be defined with raw SQL") - columns.append(.literal(literal)) + columnComponents.append(.columnLiteral(literal)) } - /// Defines the primary key. + /// Adds a primary key constraint. /// /// For example: /// @@ -766,10 +312,6 @@ public final class TableDefinition { /// See /// for more information. /// - /// - warning: This is a legacy interface that is preserved for backwards - /// compatibility. Use of this interface is not recommended: prefer - /// ``TableDefinition/primaryKey(onConflict:body:)`` instead. - /// /// - parameter columns: The primary key columns. /// - parameter conflictResolution: An optional conflict resolution /// (see ). @@ -860,7 +402,8 @@ public final class TableDefinition { /// are used. /// - deleteAction: Optional action when the referenced row is deleted. /// - updateAction: Optional action when the referenced row is updated. - /// - deferred: If true, defines a deferred foreign key constraint. + /// - isDeferred: A boolean value indicating whether the foreign key + /// constraint is deferred. /// See . public func foreignKey( _ columns: [String], @@ -868,706 +411,195 @@ public final class TableDefinition { columns destinationColumns: [String]? = nil, onDelete deleteAction: Database.ForeignKeyAction? = nil, onUpdate updateAction: Database.ForeignKeyAction? = nil, - deferred: Bool = false) + deferred isDeferred: Bool = false) { - foreignKeyConstraints.append(ForeignKeyConstraint( - columns: columns, - table: table, - destinationColumns: destinationColumns, - deleteAction: deleteAction, - updateAction: updateAction, - deferred: deferred)) + let foreignKeyConstraint = SQLForeignKeyConstraint( + columns: columns, + destinationTable: table, + destinationColumns: destinationColumns, + deleteAction: deleteAction, + updateAction: updateAction, + isDeferred: isDeferred) + columnComponents.append(.foreignKeyConstraint(foreignKeyConstraint)) } - /// Adds a check constraint. + /// Declares an association to another table. /// - /// For example: + /// `belongsTo` appends as many columns as there are columns in the + /// primary key of the referenced table, and declares a foreign key that + /// guarantees schema integrity. All primary keys are supported, + /// including composite primary keys that span several columns, and the + /// hidden `rowid` column. + /// + /// Added columns are prefixed with `name`, and end with the name of the + /// matching column in the primary key of the referenced table. In the + /// following example, `belongsTo("team")` adds a `teamId` column, and + /// `belongsTo("country")` adds a `countryCode` column: /// /// ```swift + /// try db.create(table: "team") { t in + /// t.autoIncrementedPrimaryKey("id") + /// } + /// try db.create(table: "country") { t in + /// t.primaryKey("code", .text) + /// } + /// /// // CREATE TABLE player ( - /// // personalPhone TEXT, - /// // workPhone TEXT, - /// // CHECK personalPhone IS NOT NULL OR workPhone IS NOT NULL + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // teamId INTEGER REFERENCES team(id), + /// // countryCode TEXT NOT NULL REFERENCES country(code), /// // ) /// try db.create(table: "player") { t in - /// t.column("personalPhone", .text) - /// t.column("workPhone", .text) - /// let personalPhone = Column("personalPhone") - /// let workPhone = Column("workPhone") - /// t.check(personalPhone != nil || workPhone != nil) + /// t.autoIncrementedPrimaryKey("id") + /// t.belongsTo("team") + /// t.belongsTo("country").notNull() /// } /// ``` /// - /// When defining a check constraint on a single column, you can use the - /// ``ColumnDefinition/check(_:)`` shortcut: + /// When in doubt, you can check the names of the created columns: /// /// ```swift - /// // CREATE TABLE player( - /// // name TEXT CHECK (LENGTH(name) > 0) - /// // ) - /// try db.create(table: "player") { t in - /// t.column("name", .text).check { length($0) > 0 } - /// } + /// // Prints ["id", "teamId", "countryCode"] + /// try print(db.columns(in: "player").map(\.name)) /// ``` /// - /// Related SQLite documentation: + /// Singular names can refer to database tables whose name is plural: /// - /// - parameter condition: The checked condition. - public func check(_ condition: some SQLExpressible) { - checkConstraints.append(condition.sqlExpression) - } - - /// Adds a check constraint. + /// ```swift + /// try db.create(table: "teams") { t in + /// t.autoIncrementedPrimaryKey("id") + /// } + /// try db.create(table: "countries") { t in + /// t.primaryKey("code", .text) + /// } /// - /// For example: + /// // CREATE TABLE players ( + /// // teamId INTEGER REFERENCES teams(id), + /// // countryCode TEXT REFERENCES countries(code), + /// // ) + /// try db.create(table: "players") { t in + /// t.belongsTo("team") + /// t.belongsTo("country") + /// } + /// ``` + /// + /// When the added columns should have a custom prefix, specify an + /// explicit table name: /// /// ```swift /// // CREATE TABLE player ( - /// // personalPhone TEXT, - /// // workPhone TEXT, - /// // CHECK personalPhone IS NOT NULL OR workPhone IS NOT NULL + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // captainId INTEGER REFERENCES player(id), /// // ) /// try db.create(table: "player") { t in - /// t.column("personalPhone", .text) - /// t.column("workPhone", .text) - /// t.check(sql: "personalPhone IS NOT NULL OR workPhone IS NOT NULL") + /// t.autoIncrementedPrimaryKey("id") + /// t.belongsTo("captain", inTable: "player") + /// } + /// + /// // CREATE TABLE book ( + /// // id INTEGER PRIMARY KEY AUTOINCREMENT, + /// // authorId INTEGER REFERENCES person(id), + /// // translatorId INTEGER REFERENCES person(id), + /// // title TEXT + /// // ) + /// try db.create(table: "book") { t in + /// t.autoIncrementedPrimaryKey("id") + /// t.belongsTo("author", inTable: "person") + /// t.belongsTo("translator", inTable: "person") + /// t.column("title", .text) /// } /// ``` /// - /// When defining a check constraint on a single column, you can use the - /// ``ColumnDefinition/check(sql:)`` shortcut: + /// Specify foreign key actions: /// /// ```swift - /// // CREATE TABLE player( - /// // name TEXT CHECK (LENGTH(name) > 0) - /// // ) /// try db.create(table: "player") { t in - /// t.column("name", .text).check(sql: "LENGTH(name) > 0") + /// t.belongsTo("team", onDelete: .cascade) + /// t.belongsTo("captain", inTable: "player", onDelete: .setNull) /// } /// ``` /// - /// Related SQLite documentation: - /// - /// - parameter sql: An SQL snippet - public func check(sql: String) { - checkConstraints.append(SQL(sql: sql).sqlExpression) - } - - /// Appends a table constraint. - /// - /// For example: + /// The added columns are indexed by default. You can disable this + /// automatic index with the `indexed: false` option. You can also make + /// this index unique with ``ForeignKeyDefinition/unique()``: /// /// ```swift - /// // CREATE TABLE player ( - /// // score INTEGER, - /// // CHECK (score >= 0) - /// // ) /// try db.create(table: "player") { t in - /// t.column("score", .integer) - /// t.constraint(sql: "CHECK (score >= 0)") - /// } - /// ``` - public func constraint(sql: String) { - literalConstraints.append(SQL(sql: sql)) - } - - /// Appends a table constraint. - /// - /// ``SQL`` literals allow you to safely embed raw values in your SQL, - /// without any risk of syntax errors or SQL injection: - /// - /// ```swift - /// // CREATE TABLE player ( - /// // score INTEGER, - /// // CHECK (score >= 0) - /// // ) - /// let minScore = 0 - /// try db.create(table: "player") { t in - /// t.column("score", .integer) - /// t.constraint(literal: "CHECK (score >= \(minScore))") - /// } - /// ``` - public func constraint(literal: SQL) { - literalConstraints.append(literal) - } - - fileprivate func sql(_ db: Database) throws -> String { - var statements: [String] = [] - - do { - var chunks: [String] = [] - chunks.append("CREATE") - if options.contains(.temporary) { - chunks.append("TEMPORARY") - } - chunks.append("TABLE") - if options.contains(.ifNotExists) { - chunks.append("IF NOT EXISTS") - } - chunks.append(name.quotedDatabaseIdentifier) - - let primaryKeyColumns: [String] - if let primaryKeyConstraint { - primaryKeyColumns = primaryKeyConstraint.columns - } else if let column = columns.lazy.compactMap(\.columnDefinition).first(where: { $0.primaryKey != nil }) { - primaryKeyColumns = [column.name] - } else { - // WITHOUT ROWID optimization requires a primary key. If the - // user sets withoutRowID, but does not define a primary key, - // this is undefined behavior. - // - // We thus can use the rowId column even when the withoutRowID - // flag is set ;-) - primaryKeyColumns = [Column.rowID.name] - } - - do { - var items: [String] = [] - try items.append(contentsOf: columns.map { - try $0.sql(db, tableName: name, primaryKeyColumns: primaryKeyColumns) - }) - - if let constraint = primaryKeyConstraint { - var chunks: [String] = [] - chunks.append("PRIMARY KEY") - chunks.append("(\(constraint.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", ")))") - if let conflictResolution = constraint.conflictResolution { - chunks.append("ON CONFLICT") - chunks.append(conflictResolution.rawValue) - } - items.append(chunks.joined(separator: " ")) - } - - for constraint in uniqueKeyConstraints { - var chunks: [String] = [] - chunks.append("UNIQUE") - chunks.append("(\(constraint.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", ")))") - if let conflictResolution = constraint.conflictResolution { - chunks.append("ON CONFLICT") - chunks.append(conflictResolution.rawValue) - } - items.append(chunks.joined(separator: " ")) - } - - for constraint in foreignKeyConstraints { - var chunks: [String] = [] - chunks.append("FOREIGN KEY") - chunks.append("(\(constraint.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", ")))") - chunks.append("REFERENCES") - if let destinationColumns = constraint.destinationColumns { - chunks.append(""" - \(constraint.table.quotedDatabaseIdentifier)(\ - \(destinationColumns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ - ) - """) - } else if constraint.table == name { - chunks.append(""" - \(constraint.table.quotedDatabaseIdentifier)(\ - \(primaryKeyColumns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ - ) - """) - } else { - let primaryKey = try db.primaryKey(constraint.table) - chunks.append(""" - \(constraint.table.quotedDatabaseIdentifier)(\ - \(primaryKey.columns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ - ) - """) - } - if let deleteAction = constraint.deleteAction { - chunks.append("ON DELETE") - chunks.append(deleteAction.rawValue) - } - if let updateAction = constraint.updateAction { - chunks.append("ON UPDATE") - chunks.append(updateAction.rawValue) - } - if constraint.deferred { - chunks.append("DEFERRABLE INITIALLY DEFERRED") - } - items.append(chunks.joined(separator: " ")) - } - - for checkExpression in checkConstraints { - var chunks: [String] = [] - try chunks.append("CHECK (\(checkExpression.quotedSQL(db)))") - items.append(chunks.joined(separator: " ")) - } - - for literal in literalConstraints { - let context = SQLGenerationContext(db, argumentsSink: .forRawSQL) - try items.append(literal.sql(context)) - } - - chunks.append("(\(items.joined(separator: ", ")))") - } - - var tableOptions: [String] = [] - -#if GRDBCUSTOMSQLITE || GRDBCIPHER - if options.contains(.strict) { - tableOptions.append("STRICT") - } -#else - if #available(iOS 15.4, macOS 12.4, tvOS 15.4, watchOS 8.5, *) { - if options.contains(.strict) { - tableOptions.append("STRICT") - } - } -#endif - if options.contains(.withoutRowID) { - tableOptions.append("WITHOUT ROWID") - } - - if !tableOptions.isEmpty { - chunks.append(tableOptions.joined(separator: ", ")) - } - - statements.append(chunks.joined(separator: " ")) - } - - var indexOptions: IndexOptions = [] - if options.contains(.ifNotExists) { indexOptions.insert(.ifNotExists) } - let indexStatements = try columns - .compactMap { $0.columnDefinition?.indexDefinition(in: name, options: indexOptions) } - .map { try $0.sql(db) } - statements.append(contentsOf: indexStatements) - return statements.joined(separator: "; ") - } -} - -/// A `TableDefinition` lets you modify the components of a database table. -/// -/// You don't create instances of this class. Instead, you use the `Database` -/// ``Database/alter(table:body:)`` method: -/// -/// ```swift -/// try db.alter(table: "player") { t in // t is TableAlteration -/// t.add(column: "bonus", .integer) -/// } -/// ``` -/// -/// Related SQLite documentation: -public final class TableAlteration { - private let name: String - - private enum TableAlterationKind { - case add(ColumnDefinition) - case addColumnLiteral(SQL) - case rename(old: String, new: String) - case drop(String) - } - - private var alterations: [TableAlterationKind] = [] - - init(name: String) { - self.name = name - } - - /// Appends a column. - /// - /// For example: - /// - /// ```swift - /// // ALTER TABLE player ADD COLUMN bonus integer - /// try db.alter(table: "player") { t in - /// t.add(column: "bonus", .integer) - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameter name: the column name. - /// - parameter type: the column type. - /// - returns: An ColumnDefinition that allows you to refine the - /// column definition. - @discardableResult - public func add(column name: String, _ type: Database.ColumnType? = nil) -> ColumnDefinition { - let column = ColumnDefinition(name: name, type: type) - alterations.append(.add(column)) - return column - } - - /// Appends a column. - /// - /// For example: - /// - /// ```swift - /// // ALTER TABLE player ADD COLUMN bonus integer - /// try db.alter(table: "player") { t in - /// t.addColumn(sql: "bonus integer") - /// } - /// ``` - public func addColumn(sql: String) { - alterations.append(.addColumnLiteral(SQL(sql: sql))) - } - - /// Appends a column. - /// - /// ``SQL`` literals allow you to safely embed raw values in your SQL, - /// without any risk of syntax errors or SQL injection: - /// - /// ```swift - /// // ALTER TABLE player ADD COLUMN name TEXT DEFAULT 'Anonymous' - /// try db.alter(table: "player") { t in - /// t.addColumn(literal: "name TEXT DEFAULT \(defaultName)") - /// } - /// ``` - public func addColumn(literal: SQL) { - alterations.append(.addColumnLiteral(literal)) - } - - #if GRDBCUSTOMSQLITE || GRDBCIPHER - /// Renames a column. - /// - /// For example: - /// - /// ```swift - /// try db.alter(table: "player") { t in - /// t.rename(column: "url", to: "homeURL") - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameter name: the old name of the column. - /// - parameter newName: the new name of the column. - public func rename(column name: String, to newName: String) { - _rename(column: name, to: newName) - } - - /// Drops a column. - /// - /// For example: - /// - /// ```swift - /// try db.alter(table: "player") { t in - /// t.drop(column: "age") - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - Parameter name: the name of the column to drop. - public func drop(column name: String) { - _drop(column: name) - } - #else - /// Renames a column. - /// - /// For example: + /// // teamId is not indexed + /// t.belongsTo("team", indexed: false) /// - /// ```swift - /// try db.alter(table: "player") { t in - /// t.rename(column: "url", to: "homeURL") + /// // One single player per country + /// t.belongsTo("country").unique() /// } /// ``` /// - /// Related SQLite documentation: - /// - /// - parameter name: the old name of the column. - /// - parameter newName: the new name of the column. - @available(iOS 13.0, tvOS 13.0, watchOS 6.0, *) - public func rename(column name: String, to newName: String) { - _rename(column: name, to: newName) - } - - /// Drops a column. - /// + /// For more precision in the definition of foreign keys, use instead + /// ``ColumnDefinition/references(_:column:onDelete:onUpdate:deferred:)`` + /// or ``TableDefinition/foreignKey(_:references:columns:onDelete:onUpdate:deferred:)``. /// For example: /// /// ```swift - /// try db.alter(table: "player") { t in - /// t.drop(column: "age") - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - Parameter name: the name of the column to drop. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ - public func drop(column name: String) { - _drop(column: name) - } - #endif - - private func _rename(column name: String, to newName: String) { - alterations.append(.rename(old: name, new: newName)) - } - - private func _drop(column name: String) { - alterations.append(.drop(name)) - } - - fileprivate func sql(_ db: Database) throws -> String { - var statements: [String] = [] - - for alteration in alterations { - switch alteration { - case let .add(column): - var chunks: [String] = [] - chunks.append("ALTER TABLE") - chunks.append(name.quotedDatabaseIdentifier) - chunks.append("ADD COLUMN") - try chunks.append(column.sql(db, tableName: name, primaryKeyColumns: nil)) - let statement = chunks.joined(separator: " ") - statements.append(statement) - - if let indexDefinition = column.indexDefinition(in: name) { - try statements.append(indexDefinition.sql(db)) - } - - case let .addColumnLiteral(sqlLiteral): - var chunks: [String] = [] - chunks.append("ALTER TABLE") - chunks.append(name.quotedDatabaseIdentifier) - chunks.append("ADD COLUMN") - let context = SQLGenerationContext(db, argumentsSink: .forRawSQL) - try chunks.append(sqlLiteral.sql(context)) - let statement = chunks.joined(separator: " ") - statements.append(statement) - - case let .rename(oldName, newName): - var chunks: [String] = [] - chunks.append("ALTER TABLE") - chunks.append(name.quotedDatabaseIdentifier) - chunks.append("RENAME COLUMN") - chunks.append(oldName.quotedDatabaseIdentifier) - chunks.append("TO") - chunks.append(newName.quotedDatabaseIdentifier) - let statement = chunks.joined(separator: " ") - statements.append(statement) - - case let .drop(column): - var chunks: [String] = [] - chunks.append("ALTER TABLE") - chunks.append(name.quotedDatabaseIdentifier) - chunks.append("DROP COLUMN") - chunks.append(column.quotedDatabaseIdentifier) - let statement = chunks.joined(separator: " ") - statements.append(statement) - } - } - - return statements.joined(separator: "; ") - } -} - -/// Describes a database column. -/// -/// You get instances of `ColumnDefinition` when you create or alter a database -/// tables. For example: -/// -/// ```swift -/// try db.create(table: "player") { t in -/// t.column("name", .text) // ColumnDefinition -/// } -/// -/// try db.alter(table: "player") { t in -/// t.add(column: "score", .integer) // ColumnDefinition -/// } -/// ``` -/// -/// See ``TableDefinition/column(_:_:)`` and ``TableAlteration/add(column:_:)``. -/// -/// Related SQLite documentation: -/// -/// - -/// - -/// -/// ## Topics -/// -/// ### Foreign Keys -/// -/// - ``references(_:column:onDelete:onUpdate:deferred:)`` -/// -/// ### Indexes -/// -/// - ``indexed()`` -/// - ``unique(onConflict:)`` -/// -/// ### Default value -/// -/// - ``defaults(to:)`` -/// - ``defaults(sql:)`` -/// -/// ### Collations -/// -/// - ``collate(_:)-4dljx`` -/// - ``collate(_:)-9ywza`` -/// -/// ### Generated Columns -/// -/// - ``generatedAs(_:_:)`` -/// - ``generatedAs(sql:_:)`` -/// - ``GeneratedColumnQualification`` -/// -/// ### Other Constraints -/// -/// - ``check(_:)`` -/// - ``check(sql:)`` -/// - ``notNull(onConflict:)`` -/// -/// ### Sunsetted Methods -/// -/// Those are legacy interfaces that are preserved for backwards compatibility. -/// Their use is not recommended. -/// -/// - ``primaryKey(onConflict:autoincrement:)`` -public final class ColumnDefinition { - enum Index { - case none - case index - case unique(Database.ConflictResolution) - } - - private struct ForeignKeyConstraint { - var table: String - var column: String? - var deleteAction: Database.ForeignKeyAction? - var updateAction: Database.ForeignKeyAction? - var deferred: Bool - } - - /// The kind of a generated column. - /// - /// Related SQLite documentation: - public enum GeneratedColumnQualification { - /// A `VIRTUAL` generated column. - case virtual - /// A `STORED` generated column. - case stored - } - - private struct GeneratedColumnConstraint { - var expression: SQLExpression - var qualification: GeneratedColumnQualification - } - - fileprivate let name: String - private let type: Database.ColumnType? - fileprivate var primaryKey: (conflictResolution: Database.ConflictResolution?, autoincrement: Bool)? - private var index: Index = .none - private var notNullConflictResolution: Database.ConflictResolution? - private var checkConstraints: [SQLExpression] = [] - private var foreignKeyConstraints: [ForeignKeyConstraint] = [] - private var defaultExpression: SQLExpression? - private var collationName: String? - private var generatedColumnConstraint: GeneratedColumnConstraint? - - init(name: String, type: Database.ColumnType?) { - self.name = name - self.type = type - } - - /// Adds a primary key constraint. + /// try db.create(table: "player") { t in + /// // This convenience method... + /// t.belongsTo("team") /// - /// For example: + /// // ... is equivalent to: + /// t.column("teamId", .integer) + /// .references("team") + /// .indexed() /// - /// ```swift - /// // CREATE TABLE player( - /// // id TEXT NOT NULL PRIMARY KEY - /// // ) - /// try db.create(table: "player") { t in - /// t.primaryKey("id", .text) + /// // ... and is equivalent to: + /// t.column("teamId", .integer).indexed() + /// t.foreignKey(["teamId"], references: "team") /// } /// ``` /// - /// - important: Make sure you add a not null constraint on your primary key - /// column, as in the above example, or SQLite will allow null values. - /// See - /// for more information. - /// - /// - warning: This is a legacy interface that is preserved for backwards - /// compatibility. Use of this interface is not recommended: prefer - /// ``TableDefinition/primaryKey(_:_:onConflict:)`` - /// instead. + /// See [Associations](https://github.com/groue/GRDB.swift/blob/master/Documentation/AssociationsBasics.md) + /// for more information about foreign keys and associations. /// /// - parameters: - /// - conflictResolution: An optional ``Database/ConflictResolution``. - /// - autoincrement: If true, the primary key is autoincremented. - /// - returns: `self` so that you can further refine the column definition. + /// - name: The name of the foreign key, used as a prefix for the + /// added columns. + /// - table: The referenced table. If nil, the referenced table is + /// designated by the `name` parameter. + /// - deleteAction: Optional action when the referenced row + /// is deleted. + /// - updateAction: Optional action when the referenced row + /// is updated. + /// - isDeferred: A boolean value indicating whether the foreign key + /// constraint is deferred. + /// See . + /// - indexed: A boolean value indicating whether the foreign key is + /// indexed. It is true by default. + /// - returns: A ``ForeignKeyDefinition`` that allows you to refine the + /// foreign key. @discardableResult - public func primaryKey( - onConflict conflictResolution: Database.ConflictResolution? = nil, - autoincrement: Bool = false) - -> Self + public func belongsTo( + _ name: String, + inTable table: String? = nil, + onDelete deleteAction: Database.ForeignKeyAction? = nil, + onUpdate updateAction: Database.ForeignKeyAction? = nil, + deferred isDeferred: Bool = false, + indexed: Bool = true) + -> ForeignKeyDefinition { - primaryKey = (conflictResolution: conflictResolution, autoincrement: autoincrement) - return self - } - - /// Adds a not null constraint. - /// - /// For example: - /// - /// ```swift - /// // CREATE TABLE player( - /// // name TEXT NOT NULL - /// // ) - /// try db.create(table: "player") { t in - /// t.column("name", .text).notNull() - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameter conflictResolution: An optional ``Database/ConflictResolution``. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func notNull(onConflict conflictResolution: Database.ConflictResolution? = nil) -> Self { - notNullConflictResolution = conflictResolution ?? .abort - return self - } - - /// Adds a unique constraint. - /// - /// For example: - /// - /// ```swift - /// // CREATE TABLE player( - /// // email TEXT UNIQUE - /// // ) - /// try db.create(table: "player") { t in - /// t.column("email", .text).unique() - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameter conflictResolution: An optional ``Database/ConflictResolution``. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func unique(onConflict conflictResolution: Database.ConflictResolution? = nil) -> Self { - index = .unique(conflictResolution ?? .abort) - return self - } - - /// Adds an index. - /// - /// For example: - /// - /// ```swift - /// // CREATE TABLE player(email TEXT); - /// // CREATE INDEX player_on_email ON player(email); - /// try db.create(table: "player") { t in - /// t.column("email", .text).indexed() - /// } - /// ``` - /// - /// The name of the created index is `
_on_`, where `table` - /// and `column` are the names of the table and the column. See the - /// example above. - /// - /// See also ``unique(onConflict:)``. - /// - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func indexed() -> Self { - if case .none = index { - self.index = .index + let foreignKey = ForeignKeyDefinition( + name: name, + table: table, + deleteAction: deleteAction, + updateAction: updateAction, + isIndexed: indexed && !inPrimaryKeyBody, + isDeferred: isDeferred) + columnComponents.append(.foreignKeyDefinition(foreignKey)) + + if inPrimaryKeyBody { + // Add a not null constraint in order to fix an SQLite bug: + // + foreignKey.notNull() + primaryKeyConstraint!.components.append(.foreignKeyDefinition(foreignKey)) } - return self + + return foreignKey } /// Adds a check constraint. @@ -1575,498 +607,154 @@ public final class ColumnDefinition { /// For example: /// /// ```swift - /// // CREATE TABLE player( - /// // name TEXT CHECK (LENGTH(name) > 0) + /// // CREATE TABLE player ( + /// // personalPhone TEXT, + /// // workPhone TEXT, + /// // CHECK personalPhone IS NOT NULL OR workPhone IS NOT NULL /// // ) /// try db.create(table: "player") { t in - /// t.column("name", .text).check { length($0) > 0 } + /// t.column("personalPhone", .text) + /// t.column("workPhone", .text) + /// let personalPhone = Column("personalPhone") + /// let workPhone = Column("workPhone") + /// t.check(personalPhone != nil || workPhone != nil) /// } /// ``` /// - /// Related SQLite documentation: - /// - /// - parameter condition: A closure whose argument is a ``Column`` that - /// represents the defined column, and returns the expression to check. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func check(_ condition: (Column) -> any SQLExpressible) -> Self { - checkConstraints.append(condition(Column(name)).sqlExpression) - return self - } - - /// Adds a check constraint. - /// - /// For example: + /// When defining a check constraint on a single column, you can use the + /// ``ColumnDefinition/check(_:)`` shortcut: /// /// ```swift /// // CREATE TABLE player( /// // name TEXT CHECK (LENGTH(name) > 0) /// // ) /// try db.create(table: "player") { t in - /// t.column("name", .text).check(sql: "LENGTH(name) > 0") + /// t.column("name", .text).check { length($0) > 0 } /// } /// ``` /// /// Related SQLite documentation: /// - /// - parameter sql: An SQL snippet. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func check(sql: String) -> Self { - checkConstraints.append(SQL(sql: sql).sqlExpression) - return self + /// - parameter condition: The checked condition. + @available(*, deprecated) + public func check(_ condition: some SQLExpressible) { + checkConstraints.append(condition.sqlExpression) } - /// Defines the default value. + /// Adds a check constraint. /// /// For example: /// /// ```swift - /// // CREATE TABLE player( - /// // email TEXT DEFAULT 'Anonymous' + /// // CREATE TABLE player ( + /// // personalPhone TEXT, + /// // workPhone TEXT, + /// // CHECK personalPhone IS NOT NULL OR workPhone IS NOT NULL /// // ) /// try db.create(table: "player") { t in - /// t.column("name", .text).defaults(to: "Anonymous") + /// t.column("personalPhone", .text) + /// t.column("workPhone", .text) + /// let personalPhone = Column("personalPhone") + /// let workPhone = Column("workPhone") + /// t.check(personalPhone != nil || workPhone != nil) /// } /// ``` /// - /// Related SQLite documentation: - /// - /// - parameter value: A ``DatabaseValueConvertible`` value. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func defaults(to value: some DatabaseValueConvertible) -> Self { - defaultExpression = value.sqlExpression - return self - } - - /// Defines the default value. - /// - /// For example: + /// When defining a check constraint on a single column, you can use the + /// ``ColumnDefinition/check(_:)`` shortcut: /// /// ```swift /// // CREATE TABLE player( - /// // creationDate DATETIME DEFAULT CURRENT_TIMESTAMP + /// // name TEXT CHECK (LENGTH(name) > 0) /// // ) /// try db.create(table: "player") { t in - /// t.column("creationDate", .DateTime).defaults(sql: "CURRENT_TIMESTAMP") + /// t.column("name", .text).check { length($0) > 0 } /// } /// ``` /// - /// Related SQLite documentation: + /// Related SQLite documentation: /// - /// - parameter sql: An SQL snippet. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func defaults(sql: String) -> Self { - defaultExpression = SQL(sql: sql).sqlExpression - return self + /// - parameter condition: The checked condition. + public func check(_ condition: some SQLSpecificExpressible) { + checkConstraints.append(condition.sqlExpression) } - /// Defines the default collation. + /// Adds a check constraint. /// /// For example: /// /// ```swift - /// // CREATE TABLE player( - /// // email TEXT COLLATE NOCASE + /// // CREATE TABLE player ( + /// // personalPhone TEXT, + /// // workPhone TEXT, + /// // CHECK personalPhone IS NOT NULL OR workPhone IS NOT NULL /// // ) /// try db.create(table: "player") { t in - /// t.column("email", .text).collate(.nocase) - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameter collation: A ``Database/CollationName``. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func collate(_ collation: Database.CollationName) -> Self { - collationName = collation.rawValue - return self - } - - /// Defines the default collation. - /// - /// For example: - /// - /// ```swift - /// try db.create(table: "player") { t in - /// t.column("name", .text).collate(.localizedCaseInsensitiveCompare) + /// t.column("personalPhone", .text) + /// t.column("workPhone", .text) + /// t.check(sql: "personalPhone IS NOT NULL OR workPhone IS NOT NULL") /// } /// ``` /// - /// Related SQLite documentation: - /// - /// - parameter collation: A ``DatabaseCollation``. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func collate(_ collation: DatabaseCollation) -> Self { - collationName = collation.name - return self - } - -#if GRDBCUSTOMSQLITE || GRDBCIPHER - /// Defines the column as a generated column. - /// - /// For example: + /// When defining a check constraint on a single column, you can use the + /// ``ColumnDefinition/check(sql:)`` shortcut: /// /// ```swift /// // CREATE TABLE player( - /// // id INTEGER PRIMARY KEY AUTOINCREMENT, - /// // score INTEGER NOT NULL, - /// // bonus INTEGER NOT NULL, - /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL + /// // name TEXT CHECK (LENGTH(name) > 0) /// // ) /// try db.create(table: "player") { t in - /// t.autoIncrementedPrimaryKey("id") - /// t.column("score", .integer).notNull() - /// t.column("bonus", .integer).notNull() - /// t.column("totalScore", .integer).generatedAs(sql: "score + bonus") + /// t.column("name", .text).check(sql: "LENGTH(name) > 0") /// } /// ``` /// - /// Related SQLite documentation: + /// Related SQLite documentation: /// - /// - parameters: - /// - sql: An SQL expression. - /// - qualification: The generated column's qualification, which - /// defaults to ``GeneratedColumnQualification/virtual``. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func generatedAs( - sql: String, - _ qualification: GeneratedColumnQualification = .virtual) - -> Self - { - let expression = SQL(sql: sql).sqlExpression - generatedColumnConstraint = GeneratedColumnConstraint( - expression: expression, - qualification: qualification) - return self + /// - parameter sql: An SQL snippet + public func check(sql: String) { + checkConstraints.append(SQL(sql: sql).sqlExpression) } - /// Defines the column as a generated column. - /// - /// For example: - /// - /// ```swift - /// // CREATE TABLE player( - /// // id INTEGER PRIMARY KEY AUTOINCREMENT, - /// // score INTEGER NOT NULL, - /// // bonus INTEGER NOT NULL, - /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL - /// // ) - /// try db.create(table: "player") { t in - /// t.autoIncrementedPrimaryKey("id") - /// t.column("score", .integer).notNull() - /// t.column("bonus", .integer).notNull() - /// t.column("totalScore", .integer).generatedAs(Column("score") + Column("bonus")) - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameters: - /// - expression: The generated expression. - /// - qualification: The generated column's qualification, which - /// defaults to ``GeneratedColumnQualification/virtual``. - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func generatedAs( - _ expression: some SQLExpressible, - _ qualification: GeneratedColumnQualification = .virtual) - -> Self - { - generatedColumnConstraint = GeneratedColumnConstraint( - expression: expression.sqlExpression, - qualification: qualification) - return self - } - #else - /// Defines the column as a generated column. + /// Appends a table constraint. /// /// For example: /// /// ```swift - /// // CREATE TABLE player( - /// // id INTEGER PRIMARY KEY AUTOINCREMENT, - /// // score INTEGER NOT NULL, - /// // bonus INTEGER NOT NULL, - /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL + /// // CREATE TABLE player ( + /// // score INTEGER, + /// // CHECK (score >= 0) /// // ) /// try db.create(table: "player") { t in - /// t.autoIncrementedPrimaryKey("id") - /// t.column("score", .integer).notNull() - /// t.column("bonus", .integer).notNull() - /// t.column("totalScore", .integer).generatedAs(sql: "score + bonus") + /// t.column("score", .integer) + /// t.constraint(sql: "CHECK (score >= 0)") /// } /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameters: - /// - sql: An SQL expression. - /// - qualification: The generated column's qualification, which - /// defaults to ``GeneratedColumnQualification/virtual``. - /// - returns: `self` so that you can further refine the column definition. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ (3.31 actually) - @discardableResult - public func generatedAs( - sql: String, - _ qualification: GeneratedColumnQualification = .virtual) - -> Self - { - let expression = SQL(sql: sql).sqlExpression - generatedColumnConstraint = GeneratedColumnConstraint( - expression: expression, - qualification: qualification) - return self + public func constraint(sql: String) { + literalConstraints.append(SQL(sql: sql)) } - /// Defines the column as a generated column. + /// Appends a table constraint. /// - /// For example: + /// ``SQL`` literals allow you to safely embed raw values in your SQL, + /// without any risk of syntax errors or SQL injection: /// /// ```swift - /// // CREATE TABLE player( - /// // id INTEGER PRIMARY KEY AUTOINCREMENT, - /// // score INTEGER NOT NULL, - /// // bonus INTEGER NOT NULL, - /// // totalScore INTEGER GENERATED ALWAYS AS (score + bonus) VIRTUAL + /// // CREATE TABLE player ( + /// // score INTEGER, + /// // CHECK (score >= 0) /// // ) + /// let minScore = 0 /// try db.create(table: "player") { t in - /// t.autoIncrementedPrimaryKey("id") - /// t.column("score", .integer).notNull() - /// t.column("bonus", .integer).notNull() - /// t.column("totalScore", .integer).generatedAs(Column("score") + Column("bonus")) - /// } - /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameters: - /// - expression: The generated expression. - /// - qualification: The generated column's qualification, which - /// defaults to ``GeneratedColumnQualification/virtual``. - /// - returns: `self` so that you can further refine the column definition. - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ (3.31 actually) - @discardableResult - public func generatedAs( - _ expression: some SQLExpressible, - _ qualification: GeneratedColumnQualification = .virtual) - -> Self - { - generatedColumnConstraint = GeneratedColumnConstraint( - expression: expression.sqlExpression, - qualification: qualification) - return self - } - #endif - - /// Adds a foreign key constraint. - /// - /// For example: - /// - /// ```swift - /// // CREATE TABLE book( - /// // authorId INTEGER REFERENCES author(id) ON DELETE CASCADE - /// // ) - /// try db.create(table: "book") { t in - /// t.column("authorId", .integer).references("author", onDelete: .cascade) + /// t.column("score", .integer) + /// t.constraint(literal: "CHECK (score >= \(minScore))") /// } /// ``` - /// - /// Related SQLite documentation: - /// - /// - parameters: - /// - table: The referenced table. - /// - column: The referenced column in the referenced table. If not - /// specified, the column of the primary key of the referenced table - /// is used. - /// - deleteAction: Optional action when the referenced row is deleted. - /// - updateAction: Optional action when the referenced row is updated. - /// - deferred: If true, defines a deferred foreign key constraint. - /// See . - /// - returns: `self` so that you can further refine the column definition. - @discardableResult - public func references( - _ table: String, - column: String? = nil, - onDelete deleteAction: Database.ForeignKeyAction? = nil, - onUpdate updateAction: Database.ForeignKeyAction? = nil, - deferred: Bool = false) -> Self - { - foreignKeyConstraints.append(ForeignKeyConstraint( - table: table, - column: column, - deleteAction: deleteAction, - updateAction: updateAction, - deferred: deferred)) - return self - } - - fileprivate func sql(_ db: Database, tableName: String, primaryKeyColumns: [String]?) throws -> String { - var chunks: [String] = [] - chunks.append(name.quotedDatabaseIdentifier) - if let type = type { - chunks.append(type.rawValue) - } - - if let (conflictResolution, autoincrement) = primaryKey { - chunks.append("PRIMARY KEY") - if let conflictResolution = conflictResolution { - chunks.append("ON CONFLICT") - chunks.append(conflictResolution.rawValue) - } - if autoincrement { - chunks.append("AUTOINCREMENT") - } - } - - switch notNullConflictResolution { - case .none: - break - case .abort?: - chunks.append("NOT NULL") - case let conflictResolution?: - chunks.append("NOT NULL ON CONFLICT") - chunks.append(conflictResolution.rawValue) - } - - switch index { - case .none: - break - case .unique(let conflictResolution): - switch conflictResolution { - case .abort: - chunks.append("UNIQUE") - default: - chunks.append("UNIQUE ON CONFLICT") - chunks.append(conflictResolution.rawValue) - } - case .index: - break - } - - for checkConstraint in checkConstraints { - try chunks.append("CHECK (\(checkConstraint.quotedSQL(db)))") - } - - if let defaultExpression = defaultExpression { - try chunks.append("DEFAULT \(defaultExpression.quotedSQL(db))") - } - - if let collationName = collationName { - chunks.append("COLLATE") - chunks.append(collationName) - } - - for constraint in foreignKeyConstraints { - chunks.append("REFERENCES") - if let column = constraint.column { - // explicit reference - chunks.append("\(constraint.table.quotedDatabaseIdentifier)(\(column.quotedDatabaseIdentifier))") - } else if constraint.table.lowercased() == tableName.lowercased() { - // implicit autoreference - let primaryKeyColumns = try primaryKeyColumns ?? db.primaryKey(constraint.table).columns - chunks.append(""" - \(constraint.table.quotedDatabaseIdentifier)(\ - \(primaryKeyColumns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ - ) - """) - } else { - // implicit external reference - let primaryKeyColumns = try db.primaryKey(constraint.table).columns - chunks.append(""" - \(constraint.table.quotedDatabaseIdentifier)(\ - \(primaryKeyColumns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ - ) - """) - } - if let deleteAction = constraint.deleteAction { - chunks.append("ON DELETE") - chunks.append(deleteAction.rawValue) - } - if let updateAction = constraint.updateAction { - chunks.append("ON UPDATE") - chunks.append(updateAction.rawValue) - } - if constraint.deferred { - chunks.append("DEFERRABLE INITIALLY DEFERRED") - } - } - - if let constraint = generatedColumnConstraint { - try chunks.append("GENERATED ALWAYS AS (\(constraint.expression.quotedSQL(db)))") - let qualificationLiteral: String - switch constraint.qualification { - case .stored: - qualificationLiteral = "STORED" - case .virtual: - qualificationLiteral = "VIRTUAL" - } - chunks.append(qualificationLiteral) - } - - return chunks.joined(separator: " ") - } - - fileprivate func indexDefinition(in table: String, options: IndexOptions = []) -> IndexDefinition? { - switch index { - case .none: return nil - case .unique: return nil - case .index: - return IndexDefinition( - name: "\(table)_on_\(name)", - table: table, - columns: [name], - options: options, - condition: nil) - } + public func constraint(literal: SQL) { + literalConstraints.append(literal) } } -/// Index creation options -public struct IndexOptions: OptionSet { - public let rawValue: Int - - public init(rawValue: Int) { self.rawValue = rawValue } - - /// Only creates the index if it does not already exist. - public static let ifNotExists = IndexOptions(rawValue: 1 << 0) - - /// Creates a unique index. - public static let unique = IndexOptions(rawValue: 1 << 1) -} - -private struct IndexDefinition { - let name: String - let table: String - let columns: [String] - let options: IndexOptions - let condition: SQLExpression? - - func sql(_ db: Database) throws -> String { - var chunks: [String] = [] - chunks.append("CREATE") - if options.contains(.unique) { - chunks.append("UNIQUE") - } - chunks.append("INDEX") - if options.contains(.ifNotExists) { - chunks.append("IF NOT EXISTS") - } - chunks.append(name.quotedDatabaseIdentifier) - chunks.append("ON") - chunks.append(""" - \(table.quotedDatabaseIdentifier)(\ - \(columns.map(\.quotedDatabaseIdentifier).joined(separator: ", "))\ - ) - """) - if let condition = condition { - try chunks.append("WHERE \(condition.quotedSQL(db))") - } - return chunks.joined(separator: " ") - } -} +// Explicit non-conformance to Sendable: `TableDefinition` is a mutable +// class and there is no known reason for making it thread-safe. +@available(*, unavailable) +extension TableDefinition: Sendable { } diff --git a/GRDB/QueryInterface/Schema/VirtualTableModule.swift b/GRDB/QueryInterface/Schema/VirtualTableModule.swift index 9aba553087..07bdf7bdd8 100644 --- a/GRDB/QueryInterface/Schema/VirtualTableModule.swift +++ b/GRDB/QueryInterface/Schema/VirtualTableModule.swift @@ -130,7 +130,7 @@ extension Database { // Define virtual table let configuration = VirtualTableConfiguration(ifNotExists: ifNotExists) let definition = module.makeTableDefinition(configuration: configuration) - if let body = body { + if let body { try body(definition) } diff --git a/GRDB/QueryInterface/TableRecord+Association.swift b/GRDB/QueryInterface/TableRecord+Association.swift index 766d06ffad..e86a548141 100644 --- a/GRDB/QueryInterface/TableRecord+Association.swift +++ b/GRDB/QueryInterface/TableRecord+Association.swift @@ -443,7 +443,7 @@ extension TableRecord { { let association = HasManyThroughAssociation(through: pivot, using: target) - if let key = key { + if let key { return association.forKey(key) } else { return association @@ -529,7 +529,7 @@ extension TableRecord { { let association = HasOneThroughAssociation(through: pivot, using: target) - if let key = key { + if let key { return association.forKey(key) } else { return association diff --git a/GRDB/QueryInterface/TableRecord+QueryInterfaceRequest.swift b/GRDB/QueryInterface/TableRecord+QueryInterfaceRequest.swift index 00ec063965..8044373f30 100644 --- a/GRDB/QueryInterface/TableRecord+QueryInterfaceRequest.swift +++ b/GRDB/QueryInterface/TableRecord+QueryInterfaceRequest.swift @@ -605,7 +605,7 @@ extension TableRecord { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension TableRecord where Self: Identifiable, ID: DatabaseValueConvertible { /// Returns a request filtered by primary key. /// diff --git a/GRDB/Record/EncodableRecord+Encodable.swift b/GRDB/Record/EncodableRecord+Encodable.swift index 2d6e5dba45..c7816c6723 100644 --- a/GRDB/Record/EncodableRecord+Encodable.swift +++ b/GRDB/Record/EncodableRecord+Encodable.swift @@ -94,7 +94,7 @@ private class RecordEncoder: Encoder { // swiftlint:enable comma func encodeIfPresent(_ value: T?, forKey key: Key) throws where T: Encodable { - if let value = value { + if let value { try recordEncoder.encode(value, forKey: key) } else { recordEncoder.persist(nil, forKey: key) @@ -128,7 +128,9 @@ private class RecordEncoder: Encoder { } fileprivate func encode(_ value: T, forKey key: any CodingKey) throws where T: Encodable { - if let date = value as? Date { + if let data = value as? Data { + persist(Record.databaseDataEncodingStrategy.encode(data), forKey: key) + } else if let date = value as? Date { persist(Record.databaseDateEncodingStrategy.encode(date), forKey: key) } else if let uuid = value as? UUID { persist(Record.databaseUUIDEncodingStrategy.encode(uuid), forKey: key) diff --git a/GRDB/Record/EncodableRecord.swift b/GRDB/Record/EncodableRecord.swift index 336cae571e..30bf4bb483 100644 --- a/GRDB/Record/EncodableRecord.swift +++ b/GRDB/Record/EncodableRecord.swift @@ -20,11 +20,13 @@ import Foundation // For JSONEncoder /// ### Configuring Persistence for the Standard Encodable Protocol /// /// - ``databaseColumnEncodingStrategy-5sx4v`` +/// - ``databaseDataEncodingStrategy-9y0c7`` /// - ``databaseDateEncodingStrategy-2gtc1`` /// - ``databaseEncodingUserInfo-8upii`` /// - ``databaseJSONEncoder(for:)-6x62c`` /// - ``databaseUUIDEncodingStrategy-2t96q`` /// - ``DatabaseColumnEncodingStrategy`` +/// - ``DatabaseDataEncodingStrategy`` /// - ``DatabaseDateEncodingStrategy`` /// - ``DatabaseUUIDEncodingStrategy`` /// @@ -35,6 +37,7 @@ import Foundation // For JSONEncoder /// ### Comparing Records /// /// - ``databaseChanges(from:)`` +/// - ``databaseChanges(modify:)`` /// - ``databaseEquals(_:)`` public protocol EncodableRecord { /// Encodes the record into the provided persistence container. @@ -114,6 +117,24 @@ public protocol EncodableRecord { /// ``encode(to:)-1mrt`` implementation. static func databaseJSONEncoder(for column: String) -> JSONEncoder + /// The strategy for encoding `Data` columns. + /// + /// This property is dedicated to ``EncodableRecord`` types that also + /// conform to the standard `Encodable` protocol and use the default + /// ``encode(to:)-1mrt`` implementation. + /// + /// For example: + /// + /// ```swift + /// struct Player: EncodableRecord, Encodable { + /// static let databaseDataEncodingStrategy = DatabaseDataEncodingStrategy.text + /// + /// // Encoded as SQL text. Data must contain valid UTF8 bytes. + /// var jsonData: Data + /// } + /// ``` + static var databaseDataEncodingStrategy: DatabaseDataEncodingStrategy { get } + /// The strategy for encoding `Date` columns. /// /// This property is dedicated to ``EncodableRecord`` types that also @@ -198,6 +219,12 @@ extension EncodableRecord { return encoder } + /// Returns the default strategy for encoding `Data` columns: + /// ``DatabaseDataEncodingStrategy/deferredToData``. + public static var databaseDataEncodingStrategy: DatabaseDataEncodingStrategy { + .deferredToData + } + /// Returns the default strategy for encoding `Date` columns: /// ``DatabaseDateEncodingStrategy/deferredToDate``. public static var databaseDateEncodingStrategy: DatabaseDateEncodingStrategy { @@ -264,6 +291,37 @@ extension EncodableRecord { let changes = try PersistenceContainer(self).changesIterator(from: PersistenceContainer(record)) return Dictionary(uniqueKeysWithValues: changes) } + + /// Modifies the record according to the provided `modify` closure, and + /// returns a dictionary of changed values. + /// + /// The keys of the dictionary are the changed column names. Values are + /// the database values from the initial version record. + /// + /// For example: + /// + /// ```swift + /// var player = Player(id: 1, score: 1000, hasAward: false) + /// let changes = try player.databaseChanges { + /// $0.score = 1000 + /// $0.hasAward = true + /// } + /// + /// player.hasAward // true (changed) + /// + /// changes["score"] // nil (not changed) + /// changes["hasAward"] // false (old value) + /// ``` + /// + /// - parameter modify: A closure that modifies the record. + public mutating func databaseChanges(modify: (inout Self) throws -> Void) + throws -> [String: DatabaseValue] + { + let container = try PersistenceContainer(self) + try modify(&self) + let changes = try PersistenceContainer(self).changesIterator(from: container) + return Dictionary(uniqueKeysWithValues: changes) + } } // MARK: - PersistenceContainer @@ -391,6 +449,48 @@ extension Row { } } +// MARK: - DatabaseDataEncodingStrategy + +/// `DatabaseDataEncodingStrategy` specifies how `EncodableRecord` types that +/// also adopt the standard `Encodable` protocol encode their `Data` properties +/// in the default +/// implementation. +/// +/// For example: +/// +/// ```swift +/// struct Player: EncodableRecord, Encodable { +/// static let databaseDataEncodingStrategy = DatabaseDataEncodingStrategy.text +/// +/// // Encoded as SQL text. Data must contain valid UTF8 bytes. +/// var jsonData: Data +/// } +/// ``` +public enum DatabaseDataEncodingStrategy { + /// Encodes `Data` columns as SQL blob. + case deferredToData + + /// Encodes `Data` columns as SQL text. Data must contain valid UTF8 bytes. + case text + + /// Encodes `Data` column as the result of the user-provided function. + case custom((Data) -> (any DatabaseValueConvertible)?) + + func encode(_ data: Data) -> DatabaseValue { + switch self { + case .deferredToData: + return data.databaseValue + case .text: + guard let string = String(data: data, encoding: .utf8) else { + fatalError("Invalid UTF8 data") + } + return string.databaseValue + case .custom(let format): + return format(data)?.databaseValue ?? .null + } + } +} + // MARK: - DatabaseDateEncodingStrategy /// `DatabaseDateEncodingStrategy` specifies how `EncodableRecord` types that diff --git a/GRDB/Record/FetchableRecord+Decodable.swift b/GRDB/Record/FetchableRecord+Decodable.swift index d74a49c05b..0b4a910eba 100644 --- a/GRDB/Record/FetchableRecord+Decodable.swift +++ b/GRDB/Record/FetchableRecord+Decodable.swift @@ -3,16 +3,73 @@ import Foundation extension FetchableRecord where Self: Decodable { /// Creates a record from `row`, using the `Decodable` conformance. public init(row: Row) throws { - self = try RowDecoder().decode(from: row) + self = try FetchableRecordDecoder().decode(Self.self, from: row) } } -// For testability. Not intended to become public as long as FetchableRecord has -// a non-throwing row initializer, since this would open an undesired door. -class RowDecoder { - init() { } +// TODO GRDB7: make it a final class, and Sendable. +/// An object that decodes fetchable records from database rows. +/// +/// The example below shows how to decode an instance of a simple `Player` +/// type, that conforms to both ``FetchableRecord`` and `Decodable`, from a +/// database row. +/// +/// ```swift +/// struct Player: FetchableRecord, Decodable { +/// var id: Int64 +/// var name: String +/// var score: Int +/// } +/// +/// try dbQueue.read { db in +/// if let row = try Row.fetchOne(db, sql: "SELECT * FROM player WHERE id = 42") { +/// let decoder = FetchableRecordDecoder() +/// let player = try decoder.decode(Player.self, from: row) +/// print(player.name) +/// } +/// } +/// ``` +/// +/// You will generally not need to create an instance of +/// `FetchableRecordDecoder`. The above sample code is correct, but you will +/// generally write instead: +/// +/// ```swift +/// try dbQueue.read { db in +/// // Prefer the init(row:) initializer: +/// if let row = try Row.fetchOne(db, sql: "SELECT * FROM player WHERE id = 42") { +/// let player = try Player(row: row) +/// print(player.name) +/// } +/// +/// // OR just directly fetch a player: +/// if let player = try Player.fetchOne(db, sql: "SELECT * FROM player WHERE id = 42") { +/// print(player.name) +/// } +/// } +/// ``` +/// +/// The behavior of the decoder depends on the decoded type. See: +/// +/// - ``FetchableRecord/databaseColumnDecodingStrategy-6uefz`` +/// - ``FetchableRecord/databaseDataDecodingStrategy-71bh1`` +/// - ``FetchableRecord/databaseDateDecodingStrategy-78y03`` +/// - ``FetchableRecord/databaseDecodingUserInfo-77jim`` +/// - ``FetchableRecord/databaseJSONDecoder(for:)-7lmxd`` +public class FetchableRecordDecoder { + /// Creates a decoder for fetchable records. + public init() { } - func decode(_ type: T.Type = T.self, from row: Row) throws -> T { + /// Returns a record of the type you specify, decoded from a + /// database row. + /// + /// - Parameters: + /// - type: The type of the record to decode from the supplied + /// database row. + /// - row: The database row to decode. + /// - Returns: An instance of the specified record type, if the decoder + /// can parse the database row. + public func decode(_ type: T.Type, from row: Row) throws -> T { let decoder = _RowDecoder(row: row, codingPath: [], columnDecodingStrategy: T.databaseColumnDecodingStrategy) return try T(from: decoder) } @@ -90,6 +147,7 @@ private struct _RowDecoder: Decoder { lazy var allKeys: [Key] = { let row = decoder.row + // TODO: test when _columnForKey is not nil var keys = _columnForKey.map { Set($0.keys) } ?? Set(row.columnNames) keys.formUnion(row.scopesTree.names) keys.formUnion(row.prefetchedRows.keys) @@ -98,7 +156,7 @@ private struct _RowDecoder: Decoder { func contains(_ key: Key) -> Bool { let row = decoder.row - if let _columnForKey = _columnForKey { + if let _columnForKey { if let column = _columnForKey[key.stringValue] { assert(row.hasColumn(column)) return true @@ -117,15 +175,25 @@ private struct _RowDecoder: Decoder { func decodeNil(forKey key: Key) throws -> Bool { let row = decoder.row - if let column = try? decodeColumn(forKey: key), row[column] != nil { - return false + + // Column? + if let column = try? decodeColumn(forKey: key), + let index = row.index(forColumn: column) + { + return row.hasNull(atIndex: index) } - if row.scopesTree[key.stringValue] != nil { - return false + + // Scope? + if let scopedRow = row.scopesTree[key.stringValue] { + return scopedRow.containsNonNullValue == false } + + // Prefetched Rows? if row.prefetchedRows[key.stringValue] != nil { return false } + + // Unknown key return true } @@ -203,7 +271,11 @@ private struct _RowDecoder: Decoder { { // Prefer DatabaseValueConvertible decoding over Decodable. // This allows decoding Date from String, or DatabaseValue from NULL. - if type == Date.self { + if type == Data.self { + return try R.databaseDataDecodingStrategy.decodeIfPresent( + fromRow: row, + atUncheckedIndex: index) as! T? + } else if type == Date.self { return try R.databaseDateDecodingStrategy.decodeIfPresent( fromRow: row, atUncheckedIndex: index) as! T? @@ -248,7 +320,9 @@ private struct _RowDecoder: Decoder { { // Prefer DatabaseValueConvertible decoding over Decodable. // This allows decoding Date from String, or DatabaseValue from NULL. - if type == Date.self { + if type == Data.self { + return try R.databaseDataDecodingStrategy.decode(fromRow: row, atUncheckedIndex: index) as! T + } else if type == Date.self { return try R.databaseDateDecodingStrategy.decode(fromRow: row, atUncheckedIndex: index) as! T } else if let type = T.self as? any (DatabaseValueConvertible & StatementColumnConvertible).Type { return try type.fastDecode(fromRow: row, atUncheckedIndex: index) as! T @@ -299,7 +373,7 @@ private struct _RowDecoder: Decoder { // "book", which is not the name of a column, and not the name of a // scope) has to be decoded right from the base row. But this can // happen only once. - if let decodedRootKey = decodedRootKey { + if let decodedRootKey { let keys = [decodedRootKey.stringValue, key.stringValue].sorted() throw DecodingError.keyNotFound(key, DecodingError.Context( codingPath: codingPath, @@ -313,7 +387,35 @@ private struct _RowDecoder: Decoder { func nestedContainer(keyedBy type: NestedKey.Type, forKey key: Key) throws -> KeyedDecodingContainer where NestedKey: CodingKey { - fatalError("not implemented") + let row = decoder.row + + // Column? + if let column = try? decodeColumn(forKey: key), + row.index(forColumn: column) != nil + { + // We need a JSON container, but how do we create one? + throw DecodingError.typeMismatch( + KeyedDecodingContainer.self, + DecodingError.Context( + codingPath: codingPath, + debugDescription: "not implemented: building a nested JSON container for the column '\(column)'")) + } + + // Scope? + if let scopedRow = row.scopesTree[key.stringValue] { + return KeyedDecodingContainer(KeyedContainer(decoder: _RowDecoder( + row: scopedRow, + codingPath: codingPath + [key], + columnDecodingStrategy: decoder.columnDecodingStrategy))) + } + + // Don't look for prefetched rows: those need a unkeyed container. + + throw DecodingError.typeMismatch( + KeyedDecodingContainer.self, + DecodingError.Context( + codingPath: codingPath, + debugDescription: "No keyed container found for key '\(key)'")) } func nestedUnkeyedContainer(forKey key: Key) throws -> UnkeyedDecodingContainer { @@ -369,7 +471,7 @@ private struct _RowDecoder: Decoder { } catch is JSONRequiredError { // Decode from JSON return try row.withUnsafeData(atIndex: index) { data in - guard let data = data else { + guard let data else { throw DecodingError.valueNotFound(Data.self, DecodingError.Context( codingPath: codingPath + [key], debugDescription: "Missing Data")) @@ -491,9 +593,10 @@ extension ColumnDecoder: SingleValueDecodingContainer { func decode(_ type: String.Type) throws -> String { try row.decode(atIndex: columnIndex) } func decode(_ type: T.Type) throws -> T where T: Decodable { - // Prefer DatabaseValueConvertible decoding over Decodable. - // This allows decoding Date from String, or DatabaseValue from NULL. - if type == Date.self { + // TODO: not tested + if type == Data.self { + return try R.databaseDataDecodingStrategy.decode(fromRow: row, atUncheckedIndex: columnIndex) as! T + } else if type == Date.self { return try R.databaseDateDecodingStrategy.decode(fromRow: row, atUncheckedIndex: columnIndex) as! T } else if let type = T.self as? any (DatabaseValueConvertible & StatementColumnConvertible).Type { return try type.fastDecode(fromRow: row, atUncheckedIndex: columnIndex) as! T @@ -511,6 +614,115 @@ private let iso8601Formatter: ISO8601DateFormatter = { return formatter }() +extension DatabaseDataDecodingStrategy { + fileprivate func decodeIfPresent(fromRow row: Row, atUncheckedIndex index: Int) throws -> Data? { + if let sqliteStatement = row.sqliteStatement { + return try decodeIfPresent( + fromStatement: sqliteStatement, + atUncheckedIndex: CInt(index), + context: RowDecodingContext(row: row, key: .columnIndex(index))) + } else { + return try decodeIfPresent( + fromDatabaseValue: row[index], + context: RowDecodingContext(row: row, key: .columnIndex(index))) + } + } + + fileprivate func decode(fromRow row: Row, atUncheckedIndex index: Int) throws -> Data { + if let sqliteStatement = row.sqliteStatement { + let statementIndex = CInt(index) + + if sqlite3_column_type(sqliteStatement, statementIndex) == SQLITE_NULL { + throw RowDecodingError.valueMismatch( + Data.self, + sqliteStatement: sqliteStatement, + index: statementIndex, + context: RowDecodingContext(row: row, key: .columnIndex(index))) + } + + return try decode( + fromStatement: sqliteStatement, + atUncheckedIndex: statementIndex, + context: RowDecodingContext(row: row, key: .columnIndex(index))) + } else { + return try decode( + fromDatabaseValue: row[index], + context: RowDecodingContext(row: row, key: .columnIndex(index))) + } + } + + /// - precondition: value is not NULL + fileprivate func decode( + fromStatement sqliteStatement: SQLiteStatement, + atUncheckedIndex index: CInt, + context: @autoclosure () -> RowDecodingContext) + throws -> Data + { + assert(sqlite3_column_type(sqliteStatement, index) != SQLITE_NULL, "unexpected NULL value") + switch self { + case .deferredToData: + return Data(sqliteStatement: sqliteStatement, index: index) + case .custom(let format): + let dbValue = DatabaseValue(sqliteStatement: sqliteStatement, index: index) + guard let data = format(dbValue) else { + throw RowDecodingError.valueMismatch( + Data.self, + context: context(), + databaseValue: DatabaseValue(sqliteStatement: sqliteStatement, index: index)) + } + return data + } + } + + fileprivate func decodeIfPresent( + fromStatement sqliteStatement: SQLiteStatement, + atUncheckedIndex index: CInt, + context: @autoclosure () -> RowDecodingContext) + throws -> Data? + { + if sqlite3_column_type(sqliteStatement, index) == SQLITE_NULL { + return nil + } + return try decode(fromStatement: sqliteStatement, atUncheckedIndex: index, context: context()) + } + + fileprivate func decode( + fromDatabaseValue dbValue: DatabaseValue, + context: @autoclosure () -> RowDecodingContext) + throws -> Data + { + if let data = dataFromDatabaseValue(dbValue) { + return data + } else { + throw RowDecodingError.valueMismatch(Data.self, context: context(), databaseValue: dbValue) + } + } + + fileprivate func decodeIfPresent( + fromDatabaseValue dbValue: DatabaseValue, + context: @autoclosure () -> RowDecodingContext) + throws -> Data? + { + if dbValue.isNull { + return nil + } else if let data = dataFromDatabaseValue(dbValue) { + return data + } else { + throw RowDecodingError.valueMismatch(Data.self, context: context(), databaseValue: dbValue) + } + } + + // Returns nil if decoding fails + private func dataFromDatabaseValue(_ dbValue: DatabaseValue) -> Data? { + switch self { + case .deferredToData: + return Data.fromDatabaseValue(dbValue) + case .custom(let format): + return format(dbValue) + } + } +} + extension DatabaseDateDecodingStrategy { fileprivate func decodeIfPresent(fromRow row: Row, atUncheckedIndex index: Int) throws -> Date? { if let sqliteStatement = row.sqliteStatement { @@ -527,6 +739,16 @@ extension DatabaseDateDecodingStrategy { fileprivate func decode(fromRow row: Row, atUncheckedIndex index: Int) throws -> Date { if let sqliteStatement = row.sqliteStatement { + let statementIndex = CInt(index) + + if sqlite3_column_type(sqliteStatement, statementIndex) == SQLITE_NULL { + throw RowDecodingError.valueMismatch( + Date.self, + sqliteStatement: sqliteStatement, + index: statementIndex, + context: RowDecodingContext(row: row, key: .columnIndex(index))) + } + return try decode( fromStatement: sqliteStatement, atUncheckedIndex: CInt(index), diff --git a/GRDB/Record/FetchableRecord+TableRecord.swift b/GRDB/Record/FetchableRecord+TableRecord.swift index bce826b5b6..97178adebe 100644 --- a/GRDB/Record/FetchableRecord+TableRecord.swift +++ b/GRDB/Record/FetchableRecord+TableRecord.swift @@ -217,7 +217,7 @@ extension FetchableRecord where Self: TableRecord { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension FetchableRecord where Self: TableRecord & Identifiable, ID: DatabaseValueConvertible { // MARK: Fetching by Single-Column Primary Key @@ -359,7 +359,7 @@ extension FetchableRecord where Self: TableRecord & Hashable { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension FetchableRecord where Self: TableRecord & Hashable & Identifiable, ID: DatabaseValueConvertible { /// Returns a set of records identified by their primary keys. /// diff --git a/GRDB/Record/FetchableRecord.swift b/GRDB/Record/FetchableRecord.swift index 1def13b415..609279560f 100644 --- a/GRDB/Record/FetchableRecord.swift +++ b/GRDB/Record/FetchableRecord.swift @@ -89,15 +89,18 @@ import Foundation /// ### Configuring Row Decoding for the Standard Decodable Protocol /// /// - ``databaseColumnDecodingStrategy-6uefz`` +/// - ``databaseDataDecodingStrategy-71bh1`` /// - ``databaseDateDecodingStrategy-78y03`` /// - ``databaseDecodingUserInfo-77jim`` /// - ``databaseJSONDecoder(for:)-7lmxd`` /// - ``DatabaseColumnDecodingStrategy`` +/// - ``DatabaseDataDecodingStrategy`` /// - ``DatabaseDateDecodingStrategy`` /// /// ### Supporting Types /// /// - ``RecordCursor`` +/// - ``FetchableRecordDecoder`` public protocol FetchableRecord { // MARK: - Row Decoding @@ -152,6 +155,29 @@ public protocol FetchableRecord { /// ``init(row:)-4ptlh`` implementation. static func databaseJSONDecoder(for column: String) -> JSONDecoder + /// The strategy for decoding `Data` columns. + /// + /// This property is dedicated to ``FetchableRecord`` types that also + /// conform to the standard `Decodable` protocol and use the default + /// ``init(row:)-4ptlh`` implementation. + /// + /// For example: + /// + /// ```swift + /// struct Player: FetchableRecord, Decodable { + /// static let databaseDataDecodingStrategy = DatabaseDataDecodingStrategy.custom { dbValue + /// guard let base64Data = Data.fromDatabaseValue(dbValue) else { + /// return nil + /// } + /// return Data(base64Encoded: base64Data) + /// } + /// + /// // Decoded from both database base64 strings and blobs + /// var myData: Data + /// } + /// ``` + static var databaseDataDecodingStrategy: DatabaseDataDecodingStrategy { get } + /// The strategy for decoding `Date` columns. /// /// This property is dedicated to ``FetchableRecord`` types that also @@ -215,6 +241,12 @@ extension FetchableRecord { return decoder } + /// The default strategy for decoding `Data` columns is + /// ``DatabaseDataDecodingStrategy/deferredToData``. + public static var databaseDataDecodingStrategy: DatabaseDataDecodingStrategy { + .deferredToData + } + /// The default strategy for decoding `Date` columns is /// ``DatabaseDateDecodingStrategy/deferredToDate``. public static var databaseDateDecodingStrategy: DatabaseDateDecodingStrategy { @@ -561,7 +593,7 @@ extension FetchableRecord { let request = try request.makePreparedRequest(db, forSingleResult: false) if let supplementaryFetch = request.supplementaryFetch { let rows = try Row.fetchAll(request.statement, adapter: request.adapter) - try supplementaryFetch(db, rows) + try supplementaryFetch(db, rows, nil) return try rows.map(Self.init(row:)) } else { return try fetchAll(request.statement, adapter: request.adapter) @@ -598,7 +630,7 @@ extension FetchableRecord { guard let row = try Row.fetchOne(request.statement, adapter: request.adapter) else { return nil } - try supplementaryFetch(db, [row]) + try supplementaryFetch(db, [row], nil) return try .init(row: row) } else { return try fetchOne(request.statement, adapter: request.adapter) @@ -636,7 +668,7 @@ extension FetchableRecord where Self: Hashable { let request = try request.makePreparedRequest(db, forSingleResult: false) if let supplementaryFetch = request.supplementaryFetch { let rows = try Row.fetchAll(request.statement, adapter: request.adapter) - try supplementaryFetch(db, rows) + try supplementaryFetch(db, rows, nil) return try Set(rows.lazy.map(Self.init(row:))) } else { return try fetchSet(request.statement, adapter: request.adapter) @@ -805,7 +837,9 @@ public final class RecordCursor: DatabaseCursor { init(statement: Statement, arguments: StatementArguments? = nil, adapter: (any RowAdapter)? = nil) throws { self._statement = statement _row = try Row(statement: statement).adapted(with: adapter, layout: statement) - try statement.reset(withArguments: arguments) + + // Assume cursor is created for immediate iteration: reset and set arguments + try statement.prepareExecution(withArguments: arguments) } deinit { @@ -820,6 +854,44 @@ public final class RecordCursor: DatabaseCursor { } } +// Explicit non-conformance to Sendable: database cursors must be used from +// a serialized database access dispatch queue. +@available(*, unavailable) +extension RecordCursor: Sendable { } + +// MARK: - DatabaseDataDecodingStrategy + +/// `DatabaseDataDecodingStrategy` specifies how `FetchableRecord` types that +/// also adopt the standard `Decodable` protocol decode their +/// `Data` properties. +/// +/// For example: +/// +/// ```swift +/// struct Player: FetchableRecord, Decodable { +/// static let databaseDataDecodingStrategy = DatabaseDataDecodingStrategy.custom { dbValue +/// guard let base64Data = Data.fromDatabaseValue(dbValue) else { +/// return nil +/// } +/// return Data(base64Encoded: base64Data) +/// } +/// +/// // Decoded from both database base64 strings and blobs +/// var myData: Data +/// } +/// ``` +public enum DatabaseDataDecodingStrategy { + /// Decodes `Data` columns from SQL blobs and UTF8 text. + case deferredToData + + /// Decodes `Data` columns according to the user-provided function. + /// + /// If the database value does not contain a suitable value, the function + /// must return nil (GRDB will interpret this nil result as a conversion + /// error, and react accordingly). + case custom((DatabaseValue) -> Data?) +} + // MARK: - DatabaseDateDecodingStrategy /// `DatabaseDateDecodingStrategy` specifies how `FetchableRecord` types that diff --git a/GRDB/Record/MutablePersistableRecord+Insert.swift b/GRDB/Record/MutablePersistableRecord+Insert.swift index 83e61018ef..4b2b1e1d04 100644 --- a/GRDB/Record/MutablePersistableRecord+Insert.swift +++ b/GRDB/Record/MutablePersistableRecord+Insert.swift @@ -91,6 +91,7 @@ extension MutablePersistableRecord { extension MutablePersistableRecord { #if GRDBCUSTOMSQLITE || GRDBCIPHER + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` statement, and returns a new record built /// from the inserted row. /// @@ -122,6 +123,7 @@ extension MutablePersistableRecord { return try result.insertAndFetch(db, onConflict: conflictResolution, as: Self.self) } + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` statement, and returns a new record built /// from the inserted row. /// @@ -264,6 +266,7 @@ extension MutablePersistableRecord { return success.returned } #else + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` statement, and returns a new record built /// from the inserted row. /// @@ -285,7 +288,7 @@ extension MutablePersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func insertAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil) @@ -296,6 +299,7 @@ extension MutablePersistableRecord { return try result.insertAndFetch(db, onConflict: conflictResolution, as: Self.self) } + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` statement, and returns a new record built /// from the inserted row. /// @@ -351,7 +355,7 @@ extension MutablePersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func insertAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -412,7 +416,7 @@ extension MutablePersistableRecord { /// error thrown by the persistence callbacks defined by the record type. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func insertAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, diff --git a/GRDB/Record/MutablePersistableRecord+Save.swift b/GRDB/Record/MutablePersistableRecord+Save.swift index 1de6bde593..a9776cc820 100644 --- a/GRDB/Record/MutablePersistableRecord+Save.swift +++ b/GRDB/Record/MutablePersistableRecord+Save.swift @@ -87,6 +87,7 @@ extension MutablePersistableRecord { extension MutablePersistableRecord { #if GRDBCUSTOMSQLITE || GRDBCIPHER + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` or `UPDATE RETURNING` statement, and /// returns a new record built from the saved row. /// @@ -121,6 +122,7 @@ extension MutablePersistableRecord { return try result.saveAndFetch(db, onConflict: conflictResolution, as: Self.self) } + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` or `UPDATE RETURNING` statement, and /// returns a new record built from the saved row. /// @@ -209,6 +211,7 @@ extension MutablePersistableRecord { return success.returned } #else + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` or `UPDATE RETURNING` statement, and /// returns a new record built from the saved row. /// @@ -233,7 +236,7 @@ extension MutablePersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func saveAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil) @@ -244,6 +247,7 @@ extension MutablePersistableRecord { return try result.saveAndFetch(db, onConflict: conflictResolution, as: Self.self) } + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` or `UPDATE RETURNING` statement, and /// returns a new record built from the saved row. /// @@ -261,7 +265,7 @@ extension MutablePersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func saveAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -306,7 +310,7 @@ extension MutablePersistableRecord { /// error thrown by the persistence callbacks defined by the record type. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func saveAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -369,15 +373,15 @@ extension MutablePersistableRecord { { // Attempt at updating if the record has a primary key if let key = try primaryKey(db) { + let databaseTableName = type(of: self).databaseTableName do { - let databaseTableName = type(of: self).databaseTableName let columns = try Set(db.columns(in: databaseTableName).map(\.name)) return try updateAndFetchWithCallbacks( db, onConflict: conflictResolution, columns: columns, selection: selection, fetch: fetch) - } catch RecordError.recordNotFound(databaseTableName: type(of: self).databaseTableName, key: key) { + } catch RecordError.recordNotFound(databaseTableName: databaseTableName, key: key) { // No row was updated: fallback on insert. } } diff --git a/GRDB/Record/MutablePersistableRecord+Update.swift b/GRDB/Record/MutablePersistableRecord+Update.swift index 36e9c53805..691d77d8d4 100644 --- a/GRDB/Record/MutablePersistableRecord+Update.swift +++ b/GRDB/Record/MutablePersistableRecord+Update.swift @@ -25,10 +25,9 @@ extension MutablePersistableRecord { /// /// ```swift /// try dbQueue.write { db in - /// if var player = Player.fetchOne(db, id: 1) { - /// player.score += 10 - /// try player.update(db, columns: ["score"]) - /// } + /// var player = Player.find(db, id: 1) + /// player.score += 10 + /// try player.update(db, columns: ["score"]) /// } /// ``` /// @@ -69,10 +68,9 @@ extension MutablePersistableRecord { /// /// ```swift /// try dbQueue.write { db in - /// if var player = Player.fetchOne(db, id: 1) { - /// player.score += 10 - /// try player.update(db, columns: [Column("score")]) - /// } + /// var player = Player.find(db, id: 1) + /// player.score += 10 + /// try player.update(db, columns: [Column("score")]) /// } /// ``` /// @@ -102,10 +100,9 @@ extension MutablePersistableRecord { /// /// ```swift /// try dbQueue.write { db in - /// if var player = Player.fetchOne(db, id: 1) { - /// player.score += 10 - /// try player.update(db) - /// } + /// var player = Player.find(db, id: 1) + /// player.score += 10 + /// try player.update(db) /// } /// ``` /// @@ -155,7 +152,7 @@ extension MutablePersistableRecord { /// nil, /// is used. /// - parameter record: The comparison record. - /// - returns: Whether the record had changes. + /// - returns: Whether the record had changes and was updated. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type, /// or ``RecordError/recordNotFound(databaseTableName:key:)`` if the @@ -180,16 +177,15 @@ extension MutablePersistableRecord { /// /// ```swift /// try dbQueue.write { db in - /// if var player = Player.fetchOne(db, id: 1) { - /// let modified = try player.updateChanges(db) { - /// $0.score = 1000 - /// $0.hasAward = true - /// } - /// if modified { - /// print("player was modified") - /// } else { - /// print("player was not modified") - /// } + /// var player = Player.find(db, id: 1) + /// let modified = try player.updateChanges(db) { + /// $0.score = 1000 + /// $0.hasAward = true + /// } + /// if modified { + /// print("player was modified") + /// } else { + /// print("player was not modified") /// } /// } /// ``` @@ -199,7 +195,7 @@ extension MutablePersistableRecord { /// nil, /// is used. /// - parameter modify: A closure that modifies the record. - /// - returns: Whether the record had changes. + /// - returns: Whether the record was changed and updated. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type, /// or ``RecordError/recordNotFound(databaseTableName:key:)`` if the @@ -222,6 +218,7 @@ extension MutablePersistableRecord { extension MutablePersistableRecord { #if GRDBCUSTOMSQLITE || GRDBCIPHER + // TODO: GRDB7 make it unable to return an optional /// Executes an `UPDATE RETURNING` statement on all columns, and returns a /// new record built from the updated row. /// @@ -245,6 +242,7 @@ extension MutablePersistableRecord { try updateAndFetch(db, onConflict: conflictResolution, as: Self.self) } + // TODO: GRDB7 make it unable to return an optional /// Executes an `UPDATE RETURNING` statement on all columns, and returns a /// new record built from the updated row. /// @@ -271,6 +269,7 @@ extension MutablePersistableRecord { } } + // TODO: GRDB7 make it unable to return an optional /// Modifies the record according to the provided `modify` closure, and /// executes an `UPDATE RETURNING` statement that updates the modified /// columns, if and only if the record was modified. The method returns a @@ -298,6 +297,7 @@ extension MutablePersistableRecord { try updateChangesAndFetch(db, onConflict: conflictResolution, as: Self.self, modify: modify) } + // TODO: GRDB7 make it unable to return an optional /// Modifies the record according to the provided `modify` closure, and /// executes an `UPDATE RETURNING` statement that updates the modified /// columns, if and only if the record was modified. The method returns a @@ -482,6 +482,7 @@ extension MutablePersistableRecord { fetch: fetch) } + // TODO: GRDB7 make it unable to return an optional /// Modifies the record according to the provided `modify` closure, and /// executes an `UPDATE RETURNING` statement that updates the modified /// columns, if and only if the record was modified. The method returns a @@ -518,6 +519,7 @@ extension MutablePersistableRecord { fetch: fetch) } #else + // TODO: GRDB7 make it unable to return an optional /// Executes an `UPDATE RETURNING` statement on all columns, and returns a /// new record built from the updated row. /// @@ -532,7 +534,7 @@ extension MutablePersistableRecord { /// or ``RecordError/recordNotFound(databaseTableName:key:)`` if the /// primary key does not match any row in the database. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil) @@ -557,7 +559,7 @@ extension MutablePersistableRecord { /// or ``RecordError/recordNotFound(databaseTableName:key:)`` if the /// primary key does not match any row in the database. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -569,6 +571,7 @@ extension MutablePersistableRecord { } } + // TODO: GRDB7 make it unable to return an optional /// Modifies the record according to the provided `modify` closure, and /// executes an `UPDATE RETURNING` statement that updates the modified /// columns, if and only if the record was modified. The method returns a @@ -586,7 +589,7 @@ extension MutablePersistableRecord { /// or ``RecordError/recordNotFound(databaseTableName:key:)`` if the /// primary key does not match any row in the database. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func updateChangesAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -597,6 +600,7 @@ extension MutablePersistableRecord { try updateChangesAndFetch(db, onConflict: conflictResolution, as: Self.self, modify: modify) } + // TODO: GRDB7 make it unable to return an optional /// Modifies the record according to the provided `modify` closure, and /// executes an `UPDATE RETURNING` statement that updates the modified /// columns, if and only if the record was modified. The method returns a @@ -616,7 +620,7 @@ extension MutablePersistableRecord { /// or ``RecordError/recordNotFound(databaseTableName:key:)`` if the /// primary key does not match any row in the database. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func updateChangesAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -662,7 +666,7 @@ extension MutablePersistableRecord { /// primary key does not match any row in the database. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -724,7 +728,7 @@ extension MutablePersistableRecord { /// primary key does not match any row in the database. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -768,7 +772,7 @@ extension MutablePersistableRecord { /// primary key does not match any row in the database. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func updateAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -785,6 +789,7 @@ extension MutablePersistableRecord { fetch: fetch) } + // TODO: GRDB7 make it unable to return an optional /// Modifies the record according to the provided `modify` closure, and /// executes an `UPDATE RETURNING` statement that updates the modified /// columns, if and only if the record was modified. The method returns a @@ -804,7 +809,7 @@ extension MutablePersistableRecord { /// primary key does not match any row in the database. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func updateChangesAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -850,7 +855,7 @@ extension MutablePersistableRecord { } #else @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ func updateChangesAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution?, diff --git a/GRDB/Record/MutablePersistableRecord+Upsert.swift b/GRDB/Record/MutablePersistableRecord+Upsert.swift index 38f4e41eb5..4374c19601 100644 --- a/GRDB/Record/MutablePersistableRecord+Upsert.swift +++ b/GRDB/Record/MutablePersistableRecord+Upsert.swift @@ -207,7 +207,7 @@ extension MutablePersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func upsert(_ db: Database) throws { try willSave(db) @@ -302,7 +302,7 @@ extension MutablePersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func upsertAndFetch( _ db: Database, onConflict conflictTarget: [String] = [], @@ -330,7 +330,7 @@ extension MutablePersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public mutating func upsertAndFetch( _ db: Database, as returnedType: T.Type, @@ -434,6 +434,15 @@ extension MutablePersistableRecord { // Rowid is the last column let rowid: Int64 = row[row.count - 1] let returned = try decode(row) + + // Now that we have fetched the values we need, we could stop + // there. But let's make sure we fully consume the cursor + // anyway, until SQLITE_DONE. This is necessary, for example, + // for upserts in tables that are synchronized with an + // FTS5 table. + // See + while try cursor.next() != nil { } + return (rowid, returned) } @@ -442,7 +451,7 @@ extension MutablePersistableRecord { // to false in its `aroundInsert` callback. var persistenceContainer = dao.persistenceContainer let rowIDColumn = dao.primaryKey.rowIDColumn - if let rowIDColumn = rowIDColumn { + if let rowIDColumn { persistenceContainer[caseInsensitive: rowIDColumn] = rowid } diff --git a/GRDB/Record/MutablePersistableRecord.swift b/GRDB/Record/MutablePersistableRecord.swift index c9932e1c67..d26cb1cbcc 100644 --- a/GRDB/Record/MutablePersistableRecord.swift +++ b/GRDB/Record/MutablePersistableRecord.swift @@ -348,7 +348,7 @@ extension MutablePersistableRecord { /// See `MutablePersistableRecord.persistenceConflictPolicy`. /// /// See -public struct PersistenceConflictPolicy { +public struct PersistenceConflictPolicy: Sendable { /// The conflict resolution algorithm for insertions public let conflictResolutionForInsert: Database.ConflictResolution diff --git a/GRDB/Record/PersistableRecord+Insert.swift b/GRDB/Record/PersistableRecord+Insert.swift index 112029d513..b44f3f4328 100644 --- a/GRDB/Record/PersistableRecord+Insert.swift +++ b/GRDB/Record/PersistableRecord+Insert.swift @@ -56,6 +56,7 @@ extension PersistableRecord { extension PersistableRecord { #if GRDBCUSTOMSQLITE || GRDBCIPHER + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` statement, and returns a new record built /// from the inserted row. /// @@ -198,6 +199,7 @@ extension PersistableRecord { return success.returned } #else + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` statement, and returns a new record built /// from the inserted row. /// @@ -253,7 +255,7 @@ extension PersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func insertAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -314,7 +316,7 @@ extension PersistableRecord { /// error thrown by the persistence callbacks defined by the record type. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func insertAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, diff --git a/GRDB/Record/PersistableRecord+Save.swift b/GRDB/Record/PersistableRecord+Save.swift index 31e20e73fb..9ba55346b9 100644 --- a/GRDB/Record/PersistableRecord+Save.swift +++ b/GRDB/Record/PersistableRecord+Save.swift @@ -39,6 +39,7 @@ extension PersistableRecord { extension PersistableRecord { #if GRDBCUSTOMSQLITE || GRDBCIPHER + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` or `UPDATE RETURNING` statement, and /// returns a new record built from the saved row. /// @@ -127,6 +128,7 @@ extension PersistableRecord { return success.returned } #else + // TODO: GRDB7 make it unable to return an optional /// Executes an `INSERT RETURNING` or `UPDATE RETURNING` statement, and /// returns a new record built from the saved row. /// @@ -144,7 +146,7 @@ extension PersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func saveAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, @@ -189,7 +191,7 @@ extension PersistableRecord { /// error thrown by the persistence callbacks defined by the record type. /// - precondition: `selection` is not empty. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func saveAndFetch( _ db: Database, onConflict conflictResolution: Database.ConflictResolution? = nil, diff --git a/GRDB/Record/PersistableRecord+Upsert.swift b/GRDB/Record/PersistableRecord+Upsert.swift index df5df0d3cf..af1a688079 100644 --- a/GRDB/Record/PersistableRecord+Upsert.swift +++ b/GRDB/Record/PersistableRecord+Upsert.swift @@ -207,7 +207,7 @@ extension PersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func upsert(_ db: Database) throws { try willSave(db) @@ -302,7 +302,7 @@ extension PersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func upsertAndFetch( _ db: Database, onConflict conflictTarget: [String] = [], @@ -330,7 +330,7 @@ extension PersistableRecord { /// - throws: A ``DatabaseError`` whenever an SQLite error occurs, or any /// error thrown by the persistence callbacks defined by the record type. @inlinable // allow specialization so that empty callbacks are removed - @available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) // SQLite 3.35.0+ + @available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) // SQLite 3.35.0+ public func upsertAndFetch( _ db: Database, as returnedType: T.Type, diff --git a/GRDB/Record/Record.swift b/GRDB/Record/Record.swift index 86902314f2..47f37d7421 100644 --- a/GRDB/Record/Record.swift +++ b/GRDB/Record/Record.swift @@ -203,7 +203,7 @@ open class Record { // Loop until we find a change, or exhaust columns: while let (column, newValue) = newValueIterator.next() { let newDbValue = newValue?.databaseValue ?? .null - guard let oldRow = oldRow, let oldDbValue: DatabaseValue = oldRow[column] else { + guard let oldRow, let oldDbValue: DatabaseValue = oldRow[column] else { return (column, nil) } if newDbValue != oldDbValue { @@ -411,7 +411,7 @@ open class Record { /// On success, this method sets the `hasDatabaseChanges` flag to false. /// /// - parameter db: A database connection. - /// - returns: Whether the record had changes. + /// - returns: Whether the record had changes and was updated. /// - throws: A ``DatabaseError`` whenever an SQLite error occurs. /// ``RecordError/recordNotFound(databaseTableName:key:)`` is thrown /// if the primary key does not match any row in the database and record diff --git a/GRDB/Record/TableRecord.swift b/GRDB/Record/TableRecord.swift index 86d0443c99..4e8d8b1b4c 100644 --- a/GRDB/Record/TableRecord.swift +++ b/GRDB/Record/TableRecord.swift @@ -80,7 +80,8 @@ import Foundation /// - ``joining(optional:)`` /// - ``joining(required:)`` /// - ``limit(_:offset:)`` -/// - ``matching(_:)`` +/// - ``matching(_:)-22m4o`` +/// - ``matching(_:)-1t8ph`` /// - ``none()`` /// - ``order(_:)-9rc11`` /// - ``order(_:)-2033k`` @@ -132,12 +133,15 @@ public protocol TableRecord { /// /// ```swift /// struct Player: TableRecord { - /// static let databaseSelection = [AllColumns()] + /// static let databaseSelection: [any SQLSelectable] = [AllColumns()] /// } /// /// struct PartialPlayer: TableRecord { /// static let databaseTableName = "player" - /// static let databaseSelection = [Column("id"), Column("name")] + /// static let databaseSelection: [any SQLSelectable] = [ + /// Column("id"), + /// Column("name"), + /// ] /// } /// /// // SELECT * FROM player @@ -146,6 +150,11 @@ public protocol TableRecord { /// // SELECT id, name FROM player /// try PartialPlayer.fetchAll(db) /// ``` + /// + /// > Important: Make sure the `databaseSelection` property is + /// > explicitly declared as `[any SQLSelectable]`. If it is not, the + /// > Swift compiler may silently miss the protocol requirement, + /// > resulting in sticky `SELECT *` requests. static var databaseSelection: [any SQLSelectable] { get } } @@ -309,7 +318,7 @@ extension TableRecord { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension TableRecord where Self: Identifiable, ID: DatabaseValueConvertible { /// Returns whether a record exists for this primary key. /// @@ -445,7 +454,7 @@ extension TableRecord { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension TableRecord where Self: Identifiable, ID: DatabaseValueConvertible { /// Deletes records identified by their primary keys, and returns the number /// of deleted records. @@ -660,6 +669,31 @@ extension TableRecord { // MARK: - RecordError /// A record error. +/// +/// `RecordError` is thrown by ``MutablePersistableRecord`` types when an +/// `update` method could not find any row to update: +/// +/// ```swift +/// do { +/// try player.update(db) +/// } catch let RecordError.recordNotFound(databaseTableName: table, key: key) { +/// print("Key \(key) was not found in table \(table).") +/// } +/// ``` +/// +/// `RecordError` is also thrown by ``FetchableRecord`` types when a +/// `find` method does not find any record: +/// +/// ```swift +/// do { +/// let player = try Player.find(db, id: 42) +/// } catch let RecordError.recordNotFound(databaseTableName: table, key: key) { +/// print("Key \(key) was not found in table \(table).") +/// } +/// ``` +/// +/// You can create `RecordError` instances with the +/// ``TableRecord/recordNotFound(_:id:)`` method and its variants. public enum RecordError: Error { /// A record does not exist in the database. /// @@ -700,20 +734,20 @@ extension TableRecord { /// Returns an error for a record that does not exist in the database. public static func recordNotFound(key: [String: (any DatabaseValueConvertible)?]) -> RecordError { - return RecordError.recordNotFound( + RecordError.recordNotFound( databaseTableName: databaseTableName, key: key.mapValues { $0?.databaseValue ?? .null }) } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension TableRecord where Self: Identifiable, ID: DatabaseValueConvertible { /// Returns an error for a record that does not exist in the database. /// /// - returns: ``RecordError/recordNotFound(databaseTableName:key:)``, or /// any error that prevented the `RecordError` from being constructed. public static func recordNotFound(_ db: Database, id: Self.ID) -> any Error { - return recordNotFound(db, key: id) + recordNotFound(db, key: id) } } diff --git a/GRDB/Utils/Inflections+English.swift b/GRDB/Utils/Inflections+English.swift index d9edd887d2..58338ec4f2 100644 --- a/GRDB/Utils/Inflections+English.swift +++ b/GRDB/Utils/Inflections+English.swift @@ -1,4 +1,4 @@ -// Copyright (C) 2015-2020 Gwendal Roué +// Copyright (C) 2015-2023 Gwendal Roué // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the diff --git a/GRDB/Utils/Inflections.swift b/GRDB/Utils/Inflections.swift index 860b4d0ba8..9c845c2c6b 100644 --- a/GRDB/Utils/Inflections.swift +++ b/GRDB/Utils/Inflections.swift @@ -31,7 +31,7 @@ extension String { /// A type that controls GRDB string inflections. /// /// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) -public struct Inflections { +public struct Inflections: Sendable { private var pluralizeRules: [(NSRegularExpression, String)] = [] private var singularizeRules: [(NSRegularExpression, String)] = [] private var uncountablesRegularExpressions: [String: NSRegularExpression] = [:] diff --git a/GRDB/Utils/LockedBox.swift b/GRDB/Utils/LockedBox.swift index 79388ae121..aef512604d 100644 --- a/GRDB/Utils/LockedBox.swift +++ b/GRDB/Utils/LockedBox.swift @@ -26,6 +26,8 @@ final class LockedBox { /// $count.read { print($0) } /// /// - parameter block: A closure that accepts the value. + @inline(__always) + @usableFromInline func read(_ block: (T) throws -> U) rethrows -> U { lock.lock() defer { lock.unlock() } diff --git a/GRDB/Utils/OnDemandFuture.swift b/GRDB/Utils/OnDemandFuture.swift index 6a5e71d3fd..6826014a47 100644 --- a/GRDB/Utils/OnDemandFuture.swift +++ b/GRDB/Utils/OnDemandFuture.swift @@ -14,7 +14,7 @@ import Foundation /// Both two extra scheduling guarantees are used by GRDB in order to be /// able to spawn concurrent database reads right from the database writer /// queue, and fulfill GRDB preconditions. -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) struct OnDemandFuture: Publisher { typealias Promise = (Result) -> Void typealias Output = Output @@ -33,7 +33,7 @@ struct OnDemandFuture: Publisher { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) private class OnDemandFutureSubscription: Subscription { typealias Promise = (Result) -> Void diff --git a/GRDB/Utils/OrderedDictionary.swift b/GRDB/Utils/OrderedDictionary.swift index 3bd60eaf23..4f7d5242b8 100644 --- a/GRDB/Utils/OrderedDictionary.swift +++ b/GRDB/Utils/OrderedDictionary.swift @@ -194,6 +194,19 @@ extension OrderedDictionary: Equatable where Value: Equatable { } } +extension OrderedDictionary: CustomStringConvertible { + var description: String { + let chunks = map { (key, value) in + "\(String(reflecting: key)): \(String(reflecting: value))" + } + if chunks.isEmpty { + return "[:]" + } else { + return "[\(chunks.joined(separator: ", "))]" + } + } +} + extension Dictionary { init(_ orderedDictionary: OrderedDictionary) { self = orderedDictionary.dictionary diff --git a/GRDB/Utils/Pool.swift b/GRDB/Utils/Pool.swift index d6303f4e7f..6e1014c75c 100644 --- a/GRDB/Utils/Pool.swift +++ b/GRDB/Utils/Pool.swift @@ -163,7 +163,7 @@ final class Pool { /// Blocks until no element is used, and runs the `barrier` function before /// any other element is dequeued. - func barrier(execute barrier: () throws -> T) rethrows -> T { + func barrier(execute barrier: () throws -> R) rethrows -> R { try barrierQueue.sync(flags: [.barrier]) { itemsGroup.wait() return try barrier() diff --git a/GRDB/Utils/ReceiveValuesOn.swift b/GRDB/Utils/ReceiveValuesOn.swift index 9bd6cc2b8c..abc688f78a 100644 --- a/GRDB/Utils/ReceiveValuesOn.swift +++ b/GRDB/Utils/ReceiveValuesOn.swift @@ -11,7 +11,7 @@ import Foundation /// This scheduling guarantee is used by GRDB in order to be able /// to make promises on the scheduling of database values without surprising /// the users as in . -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) struct ReceiveValuesOn: Publisher { typealias Output = Upstream.Output typealias Failure = Upstream.Failure @@ -30,7 +30,7 @@ struct ReceiveValuesOn: Publisher { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) private class ReceiveValuesOnSubscription: Subscription, Subscriber where Upstream: Publisher, @@ -211,7 +211,7 @@ where } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Publisher { /// Specifies the scheduler on which to receive values from the publisher /// diff --git a/GRDB/Utils/Utils.swift b/GRDB/Utils/Utils.swift index 49ef225c54..90e3ca5573 100644 --- a/GRDB/Utils/Utils.swift +++ b/GRDB/Utils/Utils.swift @@ -31,9 +31,9 @@ func GRDBPrecondition( file: StaticString = #file, line: UInt = #line) { - /// Custom precondition function which aims at solving - /// and - /// + // Custom precondition function which aims at solving + // and + // if !condition() { fatalError(message(), file: file, line: line) } @@ -112,7 +112,7 @@ func throwingFirstError(execute: () throws -> T, finally: () throws -> Void) firstError = error } } - if let firstError = firstError { + if let firstError { throw firstError } return result! @@ -190,3 +190,7 @@ extension NSLocking { sideEffect?() } } + +#if !canImport(ObjectiveC) +@inlinable func autoreleasepool(invoking body: () throws -> Result) rethrows -> Result { try body() } +#endif diff --git a/GRDB/ValueObservation/Observers/ValueConcurrentObserver.swift b/GRDB/ValueObservation/Observers/ValueConcurrentObserver.swift index b4e94fc3ac..78cc9352cb 100644 --- a/GRDB/ValueObservation/Observers/ValueConcurrentObserver.swift +++ b/GRDB/ValueObservation/Observers/ValueConcurrentObserver.swift @@ -101,6 +101,19 @@ final class ValueConcurrentObserver=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) +extension ValueConcurrentObserver { /// Synchronously starts the observation, and returns the initial value. /// /// Unlike `asyncStart()`, this method does not notify the initial value or error. @@ -259,27 +285,32 @@ extension ValueConcurrentObserver { // without having to wait for an eventual long-running write // transaction to complete. // - // Fetch value & tracked region in a synchronous way. - // - // We perform the initial read from a DatabaseSnapshot, because - // it is a handy way to keep a read transaction open until we grab a - // write access, and compare the database versions. - let initialSnapshot = try databaseAccess.dbPool.makeSnapshot() - let (fetchedValue, initialRegion, initialWALSnapshot) = try initialSnapshot.read { - db -> (Reducer.Fetched, DatabaseRegion, WALSnapshot?) in - // swiftlint:disable:previous closure_parameter_position - + // We perform the initial read from a long-lived WAL snapshot + // transaction, because it is a handy way to keep a read transaction + // open until we grab a write access, and compare the database versions. + let initialFetchTransaction: WALSnapshotTransaction + do { + initialFetchTransaction = try databaseAccess.dbPool.walSnapshotTransaction() + } catch DatabaseError.SQLITE_ERROR { + // We can't create a WAL snapshot. The WAL file is probably + // missing, or is truncated. Let's degrade the observation + // by not using any snapshot. + // For more information, see + return try syncStartWithoutWALSnapshot(from: databaseAccess) + } + + let (fetchedValue, initialRegion): (Reducer.Fetched, DatabaseRegion) = try initialFetchTransaction.read { db in switch trackingMode { case let .constantRegion(regions): let fetchedValue = try databaseAccess.fetch(db) let region = try DatabaseRegion.union(regions)(db) let initialRegion = try region.observableRegion(db) - return (fetchedValue, initialRegion, try? WALSnapshot(db)) + return (fetchedValue, initialRegion) case .constantRegionRecordedFromSelection, .nonConstantRegionRecordedFromSelection: let (fetchedValue, initialRegion) = try databaseAccess.fetchRecordingObservedRegion(db) - return (fetchedValue, initialRegion, try? WALSnapshot(db)) + return (fetchedValue, initialRegion) } } @@ -294,8 +325,7 @@ extension ValueConcurrentObserver { // Start observation asyncStartObservation( from: databaseAccess, - initialSnapshot: initialSnapshot, - initialWALSnapshot: initialWALSnapshot, + initialFetchTransaction: initialFetchTransaction, initialRegion: initialRegion) return initialValue @@ -310,76 +340,102 @@ extension ValueConcurrentObserver { // without having to wait for an eventual long-running write // transaction to complete. // - // We perform the initial read from a DatabaseSnapshot, because - // it is a handy way to keep a read transaction open until we grab a - // write access, and compare the database versions. - do { - let initialSnapshot = try databaseAccess.dbPool.makeSnapshot() - initialSnapshot.asyncRead { dbResult in - let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } - guard isNotifying else { return /* Cancelled */ } - - do { - // Fetch - let fetchedValue: Reducer.Fetched - let initialRegion: DatabaseRegion - let db = try dbResult.get() - switch self.trackingMode { - case let .constantRegion(regions): - fetchedValue = try databaseAccess.fetch(db) - let region = try DatabaseRegion.union(regions)(db) - initialRegion = try region.observableRegion(db) + // We perform the initial read from a long-lived WAL snapshot + // transaction, because it is a handy way to keep a read transaction + // open until we grab a write access, and compare the database versions. + databaseAccess.dbPool.asyncWALSnapshotTransaction { result in + let (isNotifying, databaseAccess) = self.lock.synchronized { + (self.notificationCallbacks != nil, self.databaseAccess) + } + guard isNotifying, let databaseAccess else { return /* Cancelled */ } + + do { + let initialFetchTransaction = try result.get() + // Second async jump because that's how + // `DatabasePool.asyncWALSnapshotTransaction` has to be used. + initialFetchTransaction.asyncRead { db in + do { + let fetchedValue: Reducer.Fetched + let initialRegion: DatabaseRegion - case .constantRegionRecordedFromSelection, - .nonConstantRegionRecordedFromSelection: - (fetchedValue, initialRegion) = try databaseAccess.fetchRecordingObservedRegion(db) - } - - // Reduce - // - // Reducing is performed asynchronously, so that we do not lock - // a database dispatch queue longer than necessary. - self.reduceQueue.async { - let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } - guard isNotifying else { return /* Cancelled */ } + switch self.trackingMode { + case let .constantRegion(regions): + fetchedValue = try databaseAccess.fetch(db) + let region = try DatabaseRegion.union(regions)(db) + initialRegion = try region.observableRegion(db) + + case .constantRegionRecordedFromSelection, + .nonConstantRegionRecordedFromSelection: + (fetchedValue, initialRegion) = try databaseAccess.fetchRecordingObservedRegion(db) + } - do { - guard let initialValue = try self.reducer._value(fetchedValue) else { - fatalError("Broken contract: reducer has no initial value") - } + // Reduce + // + // Reducing is performed asynchronously, so that we do not lock + // a database dispatch queue longer than necessary. + self.reduceQueue.async { + let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } + guard isNotifying else { return /* Cancelled */ } - // Notify - self.scheduler.schedule { - let onChange = self.lock.synchronized { self.notificationCallbacks?.onChange } - guard let onChange else { return /* Cancelled */ } - onChange(initialValue) + do { + guard let initialValue = try self.reducer._value(fetchedValue) else { + fatalError("Broken contract: reducer has no initial value") + } + + // Notify + self.scheduler.schedule { + let onChange = self.lock.synchronized { self.notificationCallbacks?.onChange } + guard let onChange else { return /* Cancelled */ } + onChange(initialValue) + } + } catch { + self.notifyError(error) } - } catch { - self.notifyError(error) } + + // Start observation + self.asyncStartObservation( + from: databaseAccess, + initialFetchTransaction: initialFetchTransaction, + initialRegion: initialRegion) + } catch { + self.notifyError(error) } - - // Start observation - self.asyncStartObservation( - from: databaseAccess, - initialSnapshot: initialSnapshot, - initialWALSnapshot: try? WALSnapshot(db), - initialRegion: initialRegion) - } catch { - self.notifyError(error) } + } catch DatabaseError.SQLITE_ERROR { + // We can't create a WAL snapshot. The WAL file is probably + // missing, or is truncated. Let's degrade the observation + // by not using any snapshot. + // For more information, see + self.asyncStartWithoutWALSnapshot(from: databaseAccess) + } catch { + self.notifyError(error) } - } catch { - self.notifyError(error) } } private func asyncStartObservation( from databaseAccess: DatabaseAccess, - initialSnapshot: DatabaseSnapshot, - initialWALSnapshot: WALSnapshot?, + initialFetchTransaction: WALSnapshotTransaction, initialRegion: DatabaseRegion) { + // We'll start the observation when we can access the writer + // connection. Until then, maybe the database has been modified + // since the initial fetch: we'll then need to notify a fresh value. + // + // To know if the database has been modified between the initial + // fetch and the writer access, we'll compare WAL snapshots. + // + // WAL snapshots can only be compared if the database is not + // checkpointed. That's why we'll keep `initialFetchTransaction` + // alive until the comparison is done. + // + // However, we want to release `initialFetchTransaction` as soon as + // possible, so that the reader connection it holds becomes + // available for other reads. It will be released when this optional + // is set to nil: + var initialFetchTransaction: WALSnapshotTransaction? = initialFetchTransaction + databaseAccess.dbPool.asyncWriteWithoutTransaction { writerDB in let events = self.lock.synchronized { self.notificationCallbacks?.events } guard let events else { return /* Cancelled */ } @@ -387,23 +443,22 @@ extension ValueConcurrentObserver { do { var observedRegion = initialRegion - // Transaction is needed for comparing version snapshots try writerDB.isolated(readOnly: true) { - // Keep initialSnapshot alive until we have compared - // database versions. It prevents database checkpointing, - // and keeps WAL snapshots (`sqlite3_snapshot`) valid - // and comparable. - let isModified = withExtendedLifetime(initialSnapshot) { - guard let initialWALSnapshot, - let currentWALSnapshot = try? WALSnapshot(writerDB) - else { - return true - } - let ordering = initialWALSnapshot.compare(currentWALSnapshot) + // Was the database modified since the initial fetch? + let isModified: Bool + if let currentWALSnapshot = try? WALSnapshot(writerDB) { + let ordering = initialFetchTransaction!.walSnapshot.compare(currentWALSnapshot) assert(ordering <= 0, "Unexpected snapshot ordering") - return ordering < 0 + isModified = ordering < 0 + } else { + // Can't compare: assume the database was modified. + isModified = true } + // Comparison done: end the WAL snapshot transaction + // and release its reader connection. + initialFetchTransaction = nil + if isModified { events.databaseDidChange?() @@ -438,7 +493,7 @@ extension ValueConcurrentObserver { let value = try self.reducer._value(fetchedValue) // Notify - if let value = value { + if let value { self.scheduler.schedule { let onChange = self.lock.synchronized { self.notificationCallbacks?.onChange } guard let onChange else { return /* Cancelled */ } @@ -463,11 +518,184 @@ extension ValueConcurrentObserver { } } } +} +#else +extension ValueConcurrentObserver { + private func syncStart(from databaseAccess: DatabaseAccess) throws -> Reducer.Value { + try syncStartWithoutWALSnapshot(from: databaseAccess) + } - private func startObservation(_ writerDB: Database, observedRegion: DatabaseRegion) { - observationState.region = observedRegion - assert(observationState.isModified == false) - writerDB.add(transactionObserver: self, extent: .observerLifetime) + private func asyncStart(from databaseAccess: DatabaseAccess) { + asyncStartWithoutWALSnapshot(from: databaseAccess) + } +} +#endif + +extension ValueConcurrentObserver { + /// Synchronously starts the observation, and returns the initial value. + /// + /// Unlike `asyncStartWithoutWALSnapshot()`, this method does not notify the initial value or error. + private func syncStartWithoutWALSnapshot(from databaseAccess: DatabaseAccess) throws -> Reducer.Value { + // Start from a read access. The whole point of using a DatabasePool + // for observing the database is to be able to fetch the initial value + // without having to wait for an eventual long-running write + // transaction to complete. + let (fetchedValue, initialRegion) = try databaseAccess.dbPool.read { db -> (Reducer.Fetched, DatabaseRegion) in + switch trackingMode { + case let .constantRegion(regions): + let fetchedValue = try databaseAccess.fetch(db) + let region = try DatabaseRegion.union(regions)(db) + let initialRegion = try region.observableRegion(db) + return (fetchedValue, initialRegion) + + case .constantRegionRecordedFromSelection, + .nonConstantRegionRecordedFromSelection: + let (fetchedValue, initialRegion) = try databaseAccess.fetchRecordingObservedRegion(db) + return (fetchedValue, initialRegion) + } + } + + // Reduce + let initialValue = try reduceQueue.sync { + guard let initialValue = try reducer._value(fetchedValue) else { + fatalError("Broken contract: reducer has no initial value") + } + return initialValue + } + + // Start observation + asyncStartObservationWithoutWALSnapshot( + from: databaseAccess, + initialRegion: initialRegion) + + return initialValue + } + + /// Asynchronously starts the observation + /// + /// Unlike `syncStartWithoutWALSnapshot()`, this method does notify the initial value or error. + private func asyncStartWithoutWALSnapshot(from databaseAccess: DatabaseAccess) { + // Start from a read access. The whole point of using a DatabasePool + // for observing the database is to be able to fetch the initial value + // without having to wait for an eventual long-running write + // transaction to complete. + databaseAccess.dbPool.asyncRead { dbResult in + let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } + guard isNotifying else { return /* Cancelled */ } + + do { + // Fetch + let fetchedValue: Reducer.Fetched + let initialRegion: DatabaseRegion + let db = try dbResult.get() + switch self.trackingMode { + case let .constantRegion(regions): + fetchedValue = try databaseAccess.fetch(db) + let region = try DatabaseRegion.union(regions)(db) + initialRegion = try region.observableRegion(db) + + case .constantRegionRecordedFromSelection, + .nonConstantRegionRecordedFromSelection: + (fetchedValue, initialRegion) = try databaseAccess.fetchRecordingObservedRegion(db) + } + + // Reduce + // + // Reducing is performed asynchronously, so that we do not lock + // a database dispatch queue longer than necessary. + self.reduceQueue.async { + let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } + guard isNotifying else { return /* Cancelled */ } + + do { + guard let initialValue = try self.reducer._value(fetchedValue) else { + fatalError("Broken contract: reducer has no initial value") + } + + // Notify + self.scheduler.schedule { + let onChange = self.lock.synchronized { self.notificationCallbacks?.onChange } + guard let onChange else { return /* Cancelled */ } + onChange(initialValue) + } + } catch { + self.notifyError(error) + } + } + + // Start observation + self.asyncStartObservationWithoutWALSnapshot( + from: databaseAccess, + initialRegion: initialRegion) + } catch { + self.notifyError(error) + } + } + } + + private func asyncStartObservationWithoutWALSnapshot( + from databaseAccess: DatabaseAccess, + initialRegion: DatabaseRegion) + { + databaseAccess.dbPool.asyncWriteWithoutTransaction { writerDB in + let events = self.lock.synchronized { self.notificationCallbacks?.events } + guard let events else { return /* Cancelled */ } + events.databaseDidChange?() + + do { + try writerDB.isolated(readOnly: true) { + // Fetch + let fetchedValue: Reducer.Fetched + let observedRegion: DatabaseRegion + switch self.trackingMode { + case .constantRegion: + fetchedValue = try databaseAccess.fetch(writerDB) + observedRegion = initialRegion + events.willTrackRegion?(initialRegion) + self.startObservation(writerDB, observedRegion: initialRegion) + + case .constantRegionRecordedFromSelection, + .nonConstantRegionRecordedFromSelection: + (fetchedValue, observedRegion) = try databaseAccess.fetchRecordingObservedRegion(writerDB) + events.willTrackRegion?(observedRegion) + self.startObservation(writerDB, observedRegion: observedRegion) + } + + // Reduce + // + // Reducing is performed asynchronously, so that we do not lock + // the writer dispatch queue longer than necessary. + // + // Important: reduceQueue.async guarantees the same ordering + // between transactions and notifications! + self.reduceQueue.async { + let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } + guard isNotifying else { return /* Cancelled */ } + + do { + let value = try self.reducer._value(fetchedValue) + + // Notify + if let value { + self.scheduler.schedule { + let onChange = self.lock.synchronized { self.notificationCallbacks?.onChange } + guard let onChange else { return /* Cancelled */ } + onChange(value) + } + } + } catch { + let dbPool = self.lock.synchronized { self.databaseAccess?.dbPool } + dbPool?.asyncWriteWithoutTransaction { writerDB in + self.stopDatabaseObservation(writerDB) + } + self.notifyError(error) + } + } + } + } catch { + self.notifyError(error) + } + } } } @@ -482,6 +710,13 @@ extension ValueConcurrentObserver: TransactionObserver { } } + func databaseDidChange() { + // Database was modified! + observationState.isModified = true + // We can stop observing the current transaction + stopObservingDatabaseChangesUntilNextTransaction() + } + func databaseDidChange(with event: DatabaseEvent) { if let region = observationState.region, region.isModified(by: event) { // Database was modified! @@ -508,13 +743,10 @@ extension ValueConcurrentObserver: TransactionObserver { events.databaseDidChange?() // Fetch - let future: DatabaseFuture - switch trackingMode { case .constantRegion, .constantRegionRecordedFromSelection: - future = databaseAccess.dbPool.concurrentRead { db in - try databaseAccess.fetch(db) - } + setNeedsFetching(databaseAccess: databaseAccess) + case .nonConstantRegionRecordedFromSelection: // When the tracked region is not constant, we can't perform // concurrent fetches of observed values. @@ -539,26 +771,63 @@ extension ValueConcurrentObserver: TransactionObserver { } observationState.region = observedRegion - future = DatabaseFuture(.success(fetchedValue)) + reduce(.success(fetchedValue)) } catch { stopDatabaseObservation(writerDB) notifyError(error) return } } - - // Reduce - // - // Reducing is performed asynchronously, so that we do not lock - // the writer dispatch queue longer than necessary. - // - // Important: reduceQueue.async guarantees the same ordering between - // transactions and notifications! + } + + private func setNeedsFetching(databaseAccess: DatabaseAccess) { + $fetchingState.update { state in + switch state { + case .idle: + state = .fetching + asyncFetch(databaseAccess: databaseAccess) + + case .fetching: + state = .fetchingAndNeedsFetch + + case .fetchingAndNeedsFetch: + break + } + } + } + + private func asyncFetch(databaseAccess: DatabaseAccess) { + databaseAccess.dbPool.asyncRead { [self] dbResult in + let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } + guard isNotifying else { return /* Cancelled */ } + + let fetchResult = dbResult.flatMap { db in + Result { try databaseAccess.fetch(db) } + } + + self.reduce(fetchResult) + + $fetchingState.update { state in + switch state { + case .idle: + // GRDB bug + preconditionFailure() + + case .fetching: + state = .idle + + case .fetchingAndNeedsFetch: + state = .fetching + asyncFetch(databaseAccess: databaseAccess) + } + } + } + } + + private func reduce(_ fetchResult: Result) { reduceQueue.async { do { - // Wait until fetch has completed - // TODO: find a way to guarantee correct ordering without waiting for a semaphore and blocking a thread. - let fetchedValue = try future.wait() + let fetchedValue = try fetchResult.get() let isNotifying = self.lock.synchronized { self.notificationCallbacks != nil } guard isNotifying else { return /* Cancelled */ } @@ -566,7 +835,7 @@ extension ValueConcurrentObserver: TransactionObserver { let value = try self.reducer._value(fetchedValue) // Notify value - if let value = value { + if let value { self.scheduler.schedule { let onChange = self.lock.synchronized { self.notificationCallbacks?.onChange } guard let onChange else { return /* Cancelled */ } diff --git a/GRDB/ValueObservation/Observers/ValueWriteOnlyObserver.swift b/GRDB/ValueObservation/Observers/ValueWriteOnlyObserver.swift index 404e73de56..ea084cce88 100644 --- a/GRDB/ValueObservation/Observers/ValueWriteOnlyObserver.swift +++ b/GRDB/ValueObservation/Observers/ValueWriteOnlyObserver.swift @@ -18,7 +18,11 @@ import Foundation /// reducing stage. /// /// **Notify** is calling user callbacks, in case of database change or error. -final class ValueWriteOnlyObserver { +final class ValueWriteOnlyObserver< + Writer: DatabaseWriter, + Reducer: ValueReducer, + Scheduler: ValueObservationScheduler> +{ // MARK: - Configuration // // Configuration is not mutable. @@ -341,6 +345,13 @@ extension ValueWriteOnlyObserver: TransactionObserver { } } + func databaseDidChange() { + // Database was modified! + observationState.isModified = true + // We can stop observing the current transaction + stopObservingDatabaseChangesUntilNextTransaction() + } + func databaseDidChange(with event: DatabaseEvent) { if let region = observationState.region, region.isModified(by: event) { // Database was modified! @@ -403,7 +414,7 @@ extension ValueWriteOnlyObserver: TransactionObserver { let value = try self.reducer._value(fetchedValue) // Notify value - if let value = value { + if let value { self.scheduler.schedule { let onChange = self.lock.synchronized { self.notificationCallbacks?.onChange } guard let onChange else { return /* Cancelled */ } diff --git a/GRDB/ValueObservation/Reducers/RemoveDuplicates.swift b/GRDB/ValueObservation/Reducers/RemoveDuplicates.swift index df1eb0ab26..6d2e64e537 100644 --- a/GRDB/ValueObservation/Reducers/RemoveDuplicates.swift +++ b/GRDB/ValueObservation/Reducers/RemoveDuplicates.swift @@ -81,7 +81,7 @@ extension ValueReducers { guard let value = try base._value(fetched) else { return nil } - if let previousValue = previousValue, predicate(previousValue, value) { + if let previousValue, predicate(previousValue, value) { // Don't notify consecutive identical values return nil } diff --git a/GRDB/ValueObservation/SharedValueObservation.swift b/GRDB/ValueObservation/SharedValueObservation.swift index 946a6c90f3..0a0e742ae3 100644 --- a/GRDB/ValueObservation/SharedValueObservation.swift +++ b/GRDB/ValueObservation/SharedValueObservation.swift @@ -1,7 +1,7 @@ import Foundation /// The extent of the shared subscription to a ``SharedValueObservation``. -public enum SharedValueObservationExtent { +public enum SharedValueObservationExtent: Sendable { /// The ``SharedValueObservation`` starts a single database observation, /// which stops when the `SharedValueObservation` is deallocated and all /// subscriptions terminated. @@ -291,7 +291,7 @@ public final class SharedValueObservation { /// print("fresh players: \(players)") /// } /// ``` - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func publisher() -> DatabasePublishers.Value { DatabasePublishers.Value { onError, onChange in self.start(onError: onError, onChange: onChange) @@ -368,7 +368,7 @@ extension SharedValueObservation { /// print("Fresh players: \(players)") /// } /// ``` - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func values(bufferingPolicy: AsyncValueObservation.BufferingPolicy = .unbounded) -> AsyncValueObservation { diff --git a/GRDB/ValueObservation/ValueObservation.swift b/GRDB/ValueObservation/ValueObservation.swift index d3f1bb2cb6..37c313b20a 100644 --- a/GRDB/ValueObservation/ValueObservation.swift +++ b/GRDB/ValueObservation/ValueObservation.swift @@ -4,344 +4,6 @@ import Combine import Dispatch import Foundation -/// `ValueObservation` tracks changes in the results of database requests, and -/// notifies fresh values whenever the database changes. -/// -/// ## Overview -/// -/// Tracked changes are insertions, updates, and deletions that impact the -/// tracked value, whether performed with raw SQL, or . -/// This includes indirect changes triggered by -/// [foreign keys actions](https://www.sqlite.org/foreignkeys.html#fk_actions) -/// or [SQL triggers](https://www.sqlite.org/lang_createtrigger.html). -/// -/// Changes to internal system tables (such as `sqlite_master`) and changes to -/// [`WITHOUT ROWID`](https://www.sqlite.org/withoutrowid.html) tables are -/// not notified. -/// -/// ## ValueObservation Usage -/// -/// 1. Make sure that a unique database connection, ``DatabaseQueue`` or -/// ``DatabasePool``, is kept open during the whole duration of -/// the observation. -/// -/// `ValueObservation` does not notify changes performed by -/// external connections. -/// See -/// for more information. -/// -/// 2. Create a `ValueObservation` with a closure that fetches the -/// observed value: -/// -/// ```swift -/// let observation = ValueObservation.tracking { db in -/// // Fetch and return the observed value -/// } -/// -/// // For example, an observation of [Player], which tracks all players: -/// let observation = ValueObservation.tracking { db in -/// try Player.fetchAll(db) -/// } -/// -/// // The same observation, using shorthand notation: -/// let observation = ValueObservation.tracking(Player.fetchAll) -/// ``` -/// -/// There is no limit on the values that can be observed. An observation -/// can perform multiple requests, from multiple database tables, and use -/// raw SQL. See ``tracking(_:)`` for some examples. -/// -/// 3. Start the observation in order to be notified of changes: -/// -/// ```swift -/// let cancellable = observation.start(in: dbQueue) { error in -/// // Handle error -/// } onChange: { (players: [Player]) in -/// print("Fresh players", players) -/// } -/// ``` -/// -/// 4. Stop the observation by calling the ``DatabaseCancellable/cancel()`` -/// method on the object returned by the `start` method. Cancellation is -/// automatic when the cancellable is deallocated: -/// -/// ```swift -/// cancellable.cancel() -/// ``` -/// -/// `ValueObservation` can also be turned into an async sequence, a Combine -/// publisher, or an RxSwift observable (see the companion library -/// [RxGRDB](https://github.com/RxSwiftCommunity/RxGRDB)): -/// -/// - Async sequence: -/// -/// ```swift -/// do { -/// for try await players in observation.values(in: dbQueue) { -/// print("Fresh players", players) -/// } -/// } catch { -/// // Handle error -/// } -/// ``` -/// -/// - Combine Publisher: -/// -/// ```swift -/// let cancellable = observation.publisher(in: dbQueue).sink { completion in -/// // Handle completion -/// } receiveValue: { (players: [Player]) in -/// print("Fresh players", players) -/// } -/// ``` -/// -/// ## ValueObservation Behavior -/// -/// `ValueObservation` notifies an initial value before the eventual changes. -/// -/// `ValueObservation` only notifies changes committed to disk. -/// -/// By default, `ValueObservation` notifies a fresh value whenever any component -/// of its fetched value is modified (any fetched column, row, etc.). This can -/// be configured: -/// see . -/// -/// By default, `ValueObservation` notifies the initial value, as well as -/// eventual changes and errors, on the main dispatch queue, asynchronously. -/// This can be configured: -/// see . -/// -/// `ValueObservation` may coalesce subsequent changes into a -/// single notification. -/// -/// `ValueObservation` may notify consecutive identical values. You can filter -/// out the undesired duplicates with the ``removeDuplicates()`` method. -/// -/// Starting an observation retains the database connection, until it is -/// stopped. As long as the observation is active, the database connection -/// won't be deallocated. -/// -/// The database observation stops when the cancellable returned by the `start` -/// method is cancelled or deallocated, or if an error occurs. -/// -/// Take care that there are use cases that `ValueObservation` is unfit for. -/// For example, an application may need to process absolutely all changes, -/// and avoid any coalescing. An application may also need to process changes -/// before any further modifications could be performed in the database file. In -/// those cases, the application needs to track *individual transactions*, not -/// values: use ``DatabaseRegionObservation``. If you need to process -/// changes before they are committed to disk, use ``TransactionObserver``. -/// -/// ## ValueObservation Scheduling -/// -/// By default, `ValueObservation` notifies the initial value, as well as -/// eventual changes and errors, on the main dispatch queue, asynchronously: -/// -/// ```swift -/// // The default scheduling -/// let cancellable = observation.start(in: dbQueue) { error in -/// // Called asynchronously on the main dispatch queue -/// } onChange: { value in -/// // Called asynchronously on the main dispatch queue -/// print("Fresh value", value) -/// } -/// ``` -/// -/// You can change this behavior by adding a `scheduling` argument to the -/// `start()` method. -/// -/// For example, the ``ValueObservationScheduler/immediate`` scheduler -/// notifies all values on the main dispatch queue, and notifies the first -/// one immediately when the observation starts. -/// -/// It is very useful in graphic applications, because you can configure views -/// right away, without waiting for the initial value to be fetched eventually. -/// You don't have to implement any empty or loading screen, or to prevent some -/// undesired initial animation. -/// -/// The `immediate` scheduling requires that the observation starts from the -/// main dispatch queue (a fatal error is raised otherwise): -/// -/// ```swift -/// let cancellable = observation.start(in: dbQueue, scheduling: .immediate) { error in -/// // Called on the main dispatch queue -/// } onChange: { value in -/// // Called on the main dispatch queue -/// print("Fresh value", value) -/// } -/// // <- Here "Fresh value" has already been printed. -/// ``` -/// -/// The other built-in scheduler ``ValueObservationScheduler/async(onQueue:)`` -/// asynchronously schedules values and errors on the dispatch queue of -/// your choice. -/// -/// ## ValueObservation Sharing -/// -/// Sharing a `ValueObservation` spares database resources. When a database -/// change happens, a fresh value is fetched only once, and then notified to -/// all clients of the shared observation. -/// -/// You build a shared observation with ``shared(in:scheduling:extent:)``: -/// -/// ```swift -/// // SharedValueObservation<[Player]> -/// let sharedObservation = ValueObservation -/// .tracking { db in try Player.fetchAll(db) } -/// .shared(in: dbQueue) -/// ``` -/// -/// `ValueObservation` and `SharedValueObservation` are nearly identical, but -/// the latter has no operator such as `map`. As a replacement, you may -/// for example use Combine apis: -/// -/// ```swift -/// let cancellable = try sharedObservation -/// .publisher() // Turn shared observation into a Combine Publisher -/// .map { ... } // The map operator from Combine -/// .sink(...) -/// ``` -/// -/// -/// ## Specifying the Tracked Region -/// -/// While the standard ``tracking(_:)`` method lets you track changes to a -/// fetched value and receive any changes to it, sometimes your use case might -/// require more granular control. -/// -/// Consider a scenario where you'd like to get a specific Player's row, but -/// only when their `score` column changes. You can use -/// ``tracking(region:fetch:)`` to do just that: -/// -/// ```swift -/// let observation = ValueObservation.tracking( -/// // Define what database region constitutes a "change" -/// region: Player.select(Column("score")).filter(id: 1), -/// // Define what to fetch upon such change -/// fetch: { db in try Player.fetchOne(db, id: 1) } -/// ) -/// ``` -/// -/// This overload of `ValueObservation` lets you entirely separate the -/// **observed region** from the **fetched value** itself, providing utmost -/// flexibility. See ``DatabaseRegionConvertible`` for more information about -/// the regions that can be tracked. -/// -/// ## ValueObservation Performance -/// -/// This section further describes runtime aspects of `ValueObservation`, and -/// provides some optimization tips for demanding applications. -/// -/// **`ValueObservation` is triggered by database transactions that may modify -/// the tracked value.** -/// -/// Precisely speaking, `ValueObservation` tracks changes in a -/// ``DatabaseRegion``, not changes in values. -/// -/// For example, if you track the maximum score of players, all transactions -/// that impact the `score` column of the `player` database table (any update, -/// insertion, or deletion) trigger the observation, even if the maximum score -/// itself is not changed. -/// -/// You can filter out undesired duplicate notifications with the -/// ``removeDuplicates()`` method. -/// -/// **ValueObservation can create database contention.** In other words, active -/// observations take a toll on the constrained database resources. When -/// triggered by impactful transactions, observations fetch fresh values, and -/// can delay read and write database accesses of other application components. -/// -/// When needed, you can help GRDB optimize observations and reduce -/// database contention: -/// -/// > Tip: Stop observations when possible. -/// > -/// > For example, if a `UIViewController` needs to display database values, it -/// > can start the observation in `viewWillAppear`, and stop it in -/// > `viewWillDisappear`. -/// > -/// > In a SwiftUI application, you can profit from the -/// > [GRDBQuery](https://github.com/groue/GRDBQuery) companion library, and its -/// > [`View.queryObservation(_:)`](https://swiftpackageindex.com/groue/grdbquery/documentation/grdbquery/queryobservation) -/// > method. -/// -/// > Tip: Share observations when possible. -/// > -/// > Each call to `ValueObservation.start` method triggers independent values -/// > refreshes. When several components of your app are interested in the same -/// > value, consider sharing the observation -/// > with ``shared(in:scheduling:extent:)``. -/// -/// > Tip: Use a ``DatabasePool``, because it can perform multi-threaded -/// > database accesses. -/// -/// > Tip: When the observation processes some raw fetched values, use the -/// > ``map(_:)`` operator: -/// > -/// > ```swift -/// > // Plain observation -/// > let observation = ValueObservation.tracking { db -> MyValue in -/// > let players = try Player.fetchAll(db) -/// > return computeMyValue(players) -/// > } -/// > -/// > // Optimized observation -/// > let observation = ValueObservation -/// > .tracking { db try Player.fetchAll(db) } -/// > .map { players in computeMyValue(players) } -/// > ``` -/// > -/// > The `map` operator helps reducing database contention because it performs -/// > its job without blocking database accesses. -/// -/// > Tip: When the observation tracks a constant database region, create an -/// > optimized observation with the ``trackingConstantRegion(_:)`` method. See -/// > the documentation of this method for more information about what -/// > constitutes a "constant region", and the nature of the optimization. -/// -/// ## Topics -/// -/// ### Creating a ValueObservation -/// -/// - ``tracking(_:)`` -/// - ``trackingConstantRegion(_:)`` -/// - ``tracking(region:fetch:)`` -/// - ``tracking(regions:fetch:)`` -/// -/// ### Creating a Shared Observation -/// -/// - ``shared(in:scheduling:extent:)`` -/// - ``SharedValueObservationExtent`` -/// -/// ### Accessing Observed Values -/// -/// - ``publisher(in:scheduling:)`` -/// - ``start(in:scheduling:onError:onChange:)`` -/// - ``values(in:scheduling:bufferingPolicy:)`` -/// - ``DatabaseCancellable`` -/// - ``ValueObservationScheduler`` -/// -/// ### Mapping Values -/// -/// - ``map(_:)`` -/// -/// ### Filtering Values -/// -/// - ``removeDuplicates()`` -/// - ``removeDuplicates(by:)`` -/// -/// ### Requiring Write Access -/// -/// - ``requiresWriteAccess`` -/// -/// ### Debugging -/// -/// - ``handleEvents(willStart:willFetch:willTrackRegion:databaseDidChange:didReceiveValue:didFail:didCancel:)`` -/// - ``print(_:to:)`` -/// -/// ### Support -/// -/// - ``ValueReducer`` public struct ValueObservation { var events = ValueObservationEvents() @@ -634,7 +296,7 @@ extension ValueObservation { /// - parameter reader: A DatabaseReader. /// - parameter scheduler: A ValueObservationScheduler. By default, fresh /// values are dispatched asynchronously on the main dispatch queue. - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func values( in reader: some DatabaseReader, scheduling scheduler: some ValueObservationScheduler = .async(onQueue: .main), @@ -648,6 +310,7 @@ extension ValueObservation { } } +// TODO: [GRDB7] Make it Sendable for easier integration with AsyncAlgorithms /// An asynchronous sequence of values observed by a ``ValueObservation``. /// /// - note: [**🔥 EXPERIMENTAL**](https://github.com/groue/GRDB.swift/blob/master/README.md#what-are-experimental-features) @@ -669,7 +332,7 @@ extension ValueObservation { /// /// You build an `AsyncValueObservation` from ``ValueObservation`` or /// ``SharedValueObservation``. -@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public struct AsyncValueObservation: AsyncSequence { public typealias BufferingPolicy = AsyncThrowingStream.Continuation.BufferingPolicy public typealias AsyncIterator = Iterator @@ -704,7 +367,7 @@ public struct AsyncValueObservation: AsyncSequence { } let iterator = stream.makeAsyncIterator() - if let cancellable = cancellable { + if let cancellable { return Iterator( iterator: iterator, cancellable: cancellable) @@ -771,7 +434,7 @@ extension ValueObservation { /// - parameter scheduler: A ValueObservationScheduler. By default, fresh /// values are dispatched asynchronously on the main dispatch queue. /// - returns: A Combine publisher - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func publisher( in reader: some DatabaseReader, scheduling scheduler: some ValueObservationScheduler = .async(onQueue: .main)) @@ -788,7 +451,7 @@ extension ValueObservation { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension DatabasePublishers { /// A publisher that publishes the values of a ``ValueObservation``. /// @@ -932,12 +595,18 @@ extension ValueObservation { /// Creates an optimized `ValueObservation` that notifies the fetched value /// whenever it changes. /// - /// The optimization reduces database contention by not blocking database - /// writes when the fresh value is fetched. + /// Unlike observations created with ``tracking(_:)``, the returned + /// observation can reduce database contention, by not blocking + /// database writes when fresh values are fetched. It can also avoid + /// fetching fresh values from the main thread, after the database was + /// modified on the main thread. /// - /// The optimization is only applied when the observation is started from a - /// ``DatabasePool``. You can start such an observation from a - /// ``DatabaseQueue``, but the optimization will not be applied. + /// Those scheduling optimizations are only applied when the observation + /// is started from a ``DatabasePool``. You can start such an + /// observation from a ``DatabaseQueue``, but the optimizations will not + /// be applied. The notified values will be the same, though. This makes + /// it possible to use a pool in the main application, and an in-memory + /// queue in tests and Xcode previews. /// /// **Precondition**: The `fetch` function must perform requests that fetch /// from a single and constant database region. This region is made of @@ -960,7 +629,7 @@ extension ValueObservation { /// /// // Tracks the 'score' column in the 'player' table /// let observation = ValueObservation.trackingConstantRegion { db -> Int? in - /// try Player.select(max(Column("score"))).fetchOne(db) + /// try Int.fetchOne(db, sql: "SELECT MAX(score) FROM player") /// } /// /// // Tracks both the 'player' and 'team' tables @@ -971,73 +640,93 @@ extension ValueObservation { /// } /// ``` /// - /// Observations that do not track a constant region must not use this - /// method. Use ``tracking(_:)`` instead, or else some changes will not - /// be notified. + /// **Observations that do not track a constant database region must not + /// use this method, because some changes may not be notified to + /// the application.** /// - /// For example, the observations below do not track a constant region, and - /// must not be optimized: + /// For example, the observations below do not track a constant region. + /// They are correctly defined with ``tracking(_:)``, since + /// `trackingConstantRegion(_:)` is unsuited: /// /// ```swift - /// // Does not always track the same row in the player table. - /// let observation = ValueObservation.tracking { db -> Player? in - /// let pref = try Preference.fetchOne(db) ?? .default - /// return try Player.fetchOne(db, id: pref.favoritePlayerId) + /// // Does not always track the same row in the 'player' table: + /// let observation = ValueObservation.tracking { db -> Player in + /// let config = try AppConfiguration.find(db) + /// let playerId: Int64 = config.favoritePlayerId + /// return try Player.find(db, id: playerId) /// } /// - /// // Does not always track the 'user' table. - /// let observation = ValueObservation.tracking { db -> [User] in - /// let pref = try Preference.fetchOne(db) ?? .default - /// let playerIds: [Int64] = pref.favoritePlayerIds // may be empty + /// // Does not always track the 'player' table, or not always the same + /// // rows in the 'player' table: + /// let observation = ValueObservation.tracking { db -> [Player] in + /// let config = try AppConfiguration.find(db) + /// let playerIds: [Int64] = config.favoritePlayerIds + /// // Not only playerIds can change, but when it is empty, + /// // the player table is not tracked at all. /// return try Player.fetchAll(db, ids: playerIds) /// } /// /// // Sometimes tracks the 'food' table, and sometimes the 'beverage' table. /// let observation = ValueObservation.tracking { db -> Int in - /// let pref = try Preference.fetchOne(db) ?? .default - /// switch pref.selection { - /// case .food: return try Food.fetchCount(db) - /// case .beverage: return try Beverage.fetchCount(db) + /// let config = try AppConfiguration.find(db) + /// switch config.selection { + /// case .food: + /// return try Food.fetchCount(db) + /// case .beverage: + /// return try Beverage.fetchCount(db) /// } /// } /// ``` /// - /// You can turn them into optimized observations of a constant region with - /// the ``Database/registerAccess(to:)`` method: - /// - /// ```swift - /// let observation = ValueObservation.trackingConstantRegion { db -> Player? in - /// // Track all players so that the observed region does not depend on - /// // the rowid of the favorite player. - /// try db.registerAccess(to: Player.all()) - /// - /// let pref = try Preference.fetchOne(db) ?? .default - /// return try Player.fetchOne(db, id: pref.favoritePlayerId) - /// } - /// - /// let observation = ValueObservation.trackingConstantRegion { db -> [User] in - /// // Track all players so that the observed region does not change - /// // even if there is no favorite player at all. - /// try db.registerAccess(to: Player.all()) - /// - /// let pref = try Preference.fetchOne(db) ?? .default - /// let playerIds: [Int64] = pref.favoritePlayerIds // may be empty - /// return try Player.fetchAll(db, ids: playerIds) - /// } - /// - /// let observation = ValueObservation.trackingConstantRegion { db -> Int in - /// // Track foods and beverages so that the observed region does not - /// // depend on preferences. - /// try db.registerAccess(to: Food.all()) - /// try db.registerAccess(to: Beverage.all()) - /// - /// let pref = try Preference.fetchOne(db) ?? .default - /// switch pref.selection { - /// case .food: return try Food.fetchCount(db) - /// case .beverage: return try Beverage.fetchCount(db) + /// Since only observations of a constant region can achieve important + /// scheduling optimizations (such as the guarantee that fresh values + /// are never fetched from the main thread – + /// see ), you can + /// always create one: + /// + /// - With ``tracking(regions:fetch:)``, you provide all tracked + /// region(s) when the observation is created: + /// + /// ```swift + /// // Optimized observation that explicitly tracks the + /// // 'appConfiguration', 'food', and 'beverage' tables: + /// let observation = ValueObservation.tracking( + /// regions: [ + /// AppConfiguration.all(), + /// Food.all(), + /// Beverage.all(), + /// ], + /// fetch: { db -> Int in + /// let config = try AppConfiguration.find(db) + /// switch config.selection { + /// case .food: + /// return try Food.fetchCount(db) + /// case .beverage: + /// return try Beverage.fetchCount(db) + /// } + /// }) + /// ``` + /// + /// - With ``Database/registerAccess(to:)``, you extend the list of + /// tracked region(s) from the fetching closure: + /// + /// ```swift + /// // Optimized observation that implicitly tracks the + /// // 'appConfiguration' table, and explicitly tracks 'food' + /// // and 'beverage': + /// let observation = ValueObservation.trackingConstantRegion { db -> Int in + /// try db.registerAccess(to: Food.all()) + /// try db.registerAccess(to: Beverage.all()) + /// + /// let config = try AppConfiguration.find(db) + /// switch config.selection { + /// case .food: + /// return try Food.fetchCount(db) + /// case .beverage: + /// return try Beverage.fetchCount(db) + /// } /// } - /// } - /// ``` + /// ``` /// /// - parameter fetch: The closure that fetches the observed value. public static func trackingConstantRegion( @@ -1060,50 +749,66 @@ extension ValueObservation { /// /// ```swift /// // Tracks the full database - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// region: .fullDatabase, /// fetch: { db in ... }) /// /// // Tracks the full 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// region: Player.all(), /// fetch: { db in ... }) /// /// // Tracks the full 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// region: Table("player"), /// fetch: { db in ... }) /// /// // Tracks the row with id 42 in the 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// region: Player.filter(id: 42), /// fetch: { db in ... }) /// /// // Tracks the 'score' column in the 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// region: Player.select(Column("score"), /// fetch: { db in ... }) /// /// // Tracks the 'score' column in the 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// region: SQLRequest("SELECT score FROM player"), /// fetch: { db in ... }) /// /// // Tracks both the 'player' and 'team' tables - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// region: Player.all(), Team.all(), /// fetch: { db in ... }) /// ``` /// - /// - parameter region: A list of observed regions. + /// Unlike observations created with ``tracking(_:)``, the returned + /// observation can reduce database contention, by not blocking + /// database writes when fresh values are fetched. It can also avoid + /// fetching fresh values from the main thread, after the database was + /// modified on the main thread. + /// + /// Those scheduling optimizations are only applied when the observation + /// is started from a ``DatabasePool``. You can start such an + /// observation from a ``DatabaseQueue``, but the optimizations will not + /// be applied. The notified values will be the same, though. This makes + /// it possible to use a pool in the main application, and an in-memory + /// queue in tests and Xcode previews. + /// + /// - parameter region: A region to observe. + /// - parameter otherRegions: A list of supplementary regions + /// to observe. /// - parameter fetch: The closure that fetches the observed value. public static func tracking( - region: any DatabaseRegionConvertible..., + region: any DatabaseRegionConvertible, + _ otherRegions: any DatabaseRegionConvertible..., fetch: @escaping (Database) throws -> Value) -> Self where Reducer == ValueReducers.Fetch { - tracking(regions: region, fetch: fetch) + tracking(regions: [region] + otherRegions, fetch: fetch) } /// Creates a `ValueObservation` that notifies the fetched value whenever @@ -1116,41 +821,54 @@ extension ValueObservation { /// /// ```swift /// // Tracks the full database - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// regions: [.fullDatabase], /// fetch: { db in ... }) /// /// // Tracks the full 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// regions: [Player.all()], /// fetch: { db in ... }) /// /// // Tracks the full 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// regions: [Table("player")], /// fetch: { db in ... }) /// /// // Tracks the row with id 42 in the 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// regions: [Player.filter(id: 42)], /// fetch: { db in ... }) /// /// // Tracks the 'score' column in the 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// regions: [Player.select(Column("score")], /// fetch: { db in ... }) /// /// // Tracks the 'score' column in the 'player' table - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// regions: [SQLRequest("SELECT score FROM player")], /// fetch: { db in ... }) /// /// // Tracks both the 'player' and 'team' tables - /// let observation = ValueObservation.tracking + /// let observation = ValueObservation.tracking( /// regions: [Player.all(), Team.all()], /// fetch: { db in ... }) /// ``` /// + /// Unlike observations created with ``tracking(_:)``, the returned + /// observation can reduce database contention, by not blocking + /// database writes when fresh values are fetched. It can also avoid + /// fetching fresh values from the main thread, after the database was + /// modified on the main thread. + /// + /// Those scheduling optimizations are only applied when the observation + /// is started from a ``DatabasePool``. You can start such an + /// observation from a ``DatabaseQueue``, but the optimizations will not + /// be applied. The notified values will be the same, though. This makes + /// it possible to use a pool in the main application, and an in-memory + /// queue in tests and Xcode previews. + /// /// - parameter regions: An array of observed regions. /// - parameter fetch: The closure that fetches the observed value. public static func tracking( diff --git a/GRDB/ValueObservation/ValueObservationScheduler.swift b/GRDB/ValueObservation/ValueObservationScheduler.swift index 6ca3447cf9..d43cad7dc3 100644 --- a/GRDB/ValueObservation/ValueObservationScheduler.swift +++ b/GRDB/ValueObservation/ValueObservationScheduler.swift @@ -66,6 +66,10 @@ extension ValueObservationScheduler where Self == AsyncValueObservationScheduler /// print("fresh players: \(players)") /// }) /// ``` + /// + /// - warning: Make sure you provide a serial queue, because a + /// concurrent one such as `DispachQueue.global(qos: .default)` would + /// mess with the ordering of fresh value notifications. public static func async(onQueue queue: DispatchQueue) -> AsyncValueObservationScheduler { AsyncValueObservationScheduler(queue: queue) } @@ -76,7 +80,7 @@ extension ValueObservationScheduler where Self == AsyncValueObservationScheduler /// A scheduler that notifies all values on the main `DispatchQueue`. The /// first value is immediately notified when the `ValueObservation` /// is started. -public struct ImmediateValueObservationScheduler: ValueObservationScheduler { +public struct ImmediateValueObservationScheduler: ValueObservationScheduler, Sendable { public init() { } public func immediateInitialValue() -> Bool { diff --git a/GRDBCustom.xcodeproj/project.pbxproj b/GRDBCustom.xcodeproj/project.pbxproj index 3b15a8f999..253e90b00e 100755 --- a/GRDBCustom.xcodeproj/project.pbxproj +++ b/GRDBCustom.xcodeproj/project.pbxproj @@ -13,6 +13,10 @@ 56012B82257404A400B4925B /* CommonTableExpression.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56012B80257404A300B4925B /* CommonTableExpression.swift */; }; 560233CF2724339A00529DF3 /* SharedValueObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 560233CC2724339A00529DF3 /* SharedValueObservationTests.swift */; }; 560233D127243A9200529DF3 /* SharedValueObservation.swift in Sources */ = {isa = PBXBuildFile; fileRef = 560233D027243A9100529DF3 /* SharedValueObservation.swift */; }; + 5603CEC42AC862F800CF097D /* SQLJSONFunctions.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CEBF2AC862F800CF097D /* SQLJSONFunctions.swift */; }; + 5603CEC52AC862F800CF097D /* SQLJSONExpressible.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CEC02AC862F800CF097D /* SQLJSONExpressible.swift */; }; + 5603CEC62AC862F800CF097D /* JSONColumn.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CEC12AC862F800CF097D /* JSONColumn.swift */; }; + 5603CED52AC8643800CF097D /* JSONExpressionsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CECC2AC8633B00CF097D /* JSONExpressionsTests.swift */; }; 56043296228F00A9009D3FE2 /* OrderedDictionaryTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56043295228F00A9009D3FE2 /* OrderedDictionaryTests.swift */; }; 560432A6228F167A009D3FE2 /* AssociationPrefetchingObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 560432A5228F167A009D3FE2 /* AssociationPrefetchingObservationTests.swift */; }; 5604484E25DEEF7C002BAA79 /* AssociationPrefetchingRelationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5604484C25DEEF7C002BAA79 /* AssociationPrefetchingRelationTests.swift */; }; @@ -33,6 +37,9 @@ 561CFA7D2373503D000C8BAA /* TableRecordUpdateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561CFA7B2373503D000C8BAA /* TableRecordUpdateTests.swift */; }; 561CFAA12376EF4F000C8BAA /* AssociationHasManyOrderingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561CFA9F2376EF4F000C8BAA /* AssociationHasManyOrderingTests.swift */; }; 561CFAA42376EF59000C8BAA /* AssociationHasManyThroughOrderingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561CFAA32376EF59000C8BAA /* AssociationHasManyThroughOrderingTests.swift */; }; + 561F38DB2AC8914D0051EEE9 /* JSONColumnTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38DA2AC8914D0051EEE9 /* JSONColumnTests.swift */; }; + 561F38F22AC9CE220051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38F02AC9CE220051EEE9 /* DatabaseDataEncodingStrategyTests.swift */; }; + 561F38F62AC9CE5A0051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38F52AC9CE5A0051EEE9 /* DatabaseDataDecodingStrategyTests.swift */; }; 562205FA1E420E49005860AC /* DatabasePoolReleaseMemoryTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563363CF1C943D13000BE133 /* DatabasePoolReleaseMemoryTests.swift */; }; 562205FB1E420E49005860AC /* DatabasePoolSchemaCacheTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 569531281C908A5B00CF1A2B /* DatabasePoolSchemaCacheTests.swift */; }; 562205FC1E420E49005860AC /* DatabaseQueueReleaseMemoryTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563363D41C94484E000BE133 /* DatabaseQueueReleaseMemoryTests.swift */; }; @@ -48,8 +55,11 @@ 562393631DEE06D300A6B01F /* CursorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623935F1DEE06D300A6B01F /* CursorTests.swift */; }; 5623936C1DEE0CD200A6B01F /* FlattenCursorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562393681DEE0CD200A6B01F /* FlattenCursorTests.swift */; }; 562393751DEE104400A6B01F /* MapCursorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562393711DEE104400A6B01F /* MapCursorTests.swift */; }; + 5623B6192AED39C300436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6162AED39C200436239 /* DatabaseQueueInMemoryCopyTests.swift */; }; + 5623B61A2AED39C300436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6182AED39C200436239 /* DatabaseQueueTemporaryCopyTests.swift */; }; 56256EDE25D1BC07008C2BDD /* ForeignKey.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56256EDD25D1BC07008C2BDD /* ForeignKey.swift */; }; 562756461E963AAC0035B653 /* DatabaseWriterTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562756421E963AAC0035B653 /* DatabaseWriterTests.swift */; }; + 562B58CE2A29BC0700E8C75D /* Issue1383.sqlite in Resources */ = {isa = PBXBuildFile; fileRef = 562B58CC2A29BC0700E8C75D /* Issue1383.sqlite */; }; 562EA8291F17B2AC00FA528C /* CompilationProtocolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562EA8251F17B2AC00FA528C /* CompilationProtocolTests.swift */; }; 562EA8321F17B9EB00FA528C /* CompilationSubClassTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 562EA82E1F17B9EB00FA528C /* CompilationSubClassTests.swift */; }; 563082EA2430B6CD00C14A05 /* DatabaseCancellable.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563082E82430B6CD00C14A05 /* DatabaseCancellable.swift */; }; @@ -68,6 +78,7 @@ 563B8FBB24A1D036007A48C9 /* ReceiveValuesOn.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563B8FB924A1D036007A48C9 /* ReceiveValuesOn.swift */; }; 563B8FBD24A1D388007A48C9 /* OnDemandFuture.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563B8FBC24A1D388007A48C9 /* OnDemandFuture.swift */; }; 563C67B824628C0C00E94EDC /* DatabasePoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563C67B624628C0C00E94EDC /* DatabasePoolTests.swift */; }; + 563CBBE42A595141008905CE /* SQLIndexGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563CBBE22A595141008905CE /* SQLIndexGenerator.swift */; }; 563DE4F8231A91F6005081B7 /* DatabaseConfigurationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563DE4F6231A91F6005081B7 /* DatabaseConfigurationTests.swift */; }; 563EF420215F8A76007DAACD /* OrderedDictionary.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563EF41E215F8A76007DAACD /* OrderedDictionary.swift */; }; 563EF442216131F5007DAACD /* AssociationAggregateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 563EF441216131F5007DAACD /* AssociationAggregateTests.swift */; }; @@ -89,6 +100,7 @@ 56419C8F24A51D7D004967E1 /* Map.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419A7F24A51614004967E1 /* Map.swift */; }; 56419C9024A51D7D004967E1 /* Inverted.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419A8024A51614004967E1 /* Inverted.swift */; }; 56419C9124A51D7D004967E1 /* PublisherExpectation.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419A8124A51614004967E1 /* PublisherExpectation.swift */; }; + 5642A31B2AD66E0C0065F717 /* LineDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5642A3192AD66E0C0065F717 /* LineDumpFormat.swift */; }; 564448861EF56B1B00DD2861 /* DatabaseAfterNextTransactionCommitTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 564448821EF56B1B00DD2861 /* DatabaseAfterNextTransactionCommitTests.swift */; }; 5644DE7F20F8D1D1001FFDDE /* DatabaseValueConversionErrorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5644DE7E20F8D1D1001FFDDE /* DatabaseValueConversionErrorTests.swift */; }; 564B3D72239BDBD6007BF308 /* DatabaseSuspensionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 564B3D70239BDBD6007BF308 /* DatabaseSuspensionTests.swift */; }; @@ -192,13 +204,24 @@ 5676FBAA22F5CEB9004717D9 /* ValueObservationRegionRecordingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5676FBA822F5CEB8004717D9 /* ValueObservationRegionRecordingTests.swift */; }; 56781B08243F7B5300650A83 /* GRDB-Bridging.h in Headers */ = {isa = PBXBuildFile; fileRef = 56781B06243F7B4B00650A83 /* GRDB-Bridging.h */; settings = {ATTRIBUTES = (Public, ); }; }; 567A80561D41350C00C7DCEC /* IndexInfoTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567A80521D41350C00C7DCEC /* IndexInfoTests.swift */; }; + 567B5BF62AD3285100629622 /* DatabaseReader+dump.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BEE2AD3285100629622 /* DatabaseReader+dump.swift */; }; + 567B5BF72AD3285100629622 /* DumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BEF2AD3285100629622 /* DumpFormat.swift */; }; + 567B5BF82AD3285100629622 /* QuoteDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BF12AD3285100629622 /* QuoteDumpFormat.swift */; }; + 567B5BF92AD3285100629622 /* JSONDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BF22AD3285100629622 /* JSONDumpFormat.swift */; }; + 567B5BFA2AD3285100629622 /* DebugDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BF32AD3285100629622 /* DebugDumpFormat.swift */; }; + 567B5BFB2AD3285100629622 /* Database+Dump.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BF42AD3285100629622 /* Database+Dump.swift */; }; + 567B5C042AD328D900629622 /* DatabaseColumnEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C022AD328D900629622 /* DatabaseColumnEncodingStrategyTests.swift */; }; + 567B5C562AD3301800629622 /* DatabaseDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BFD2AD3287800629622 /* DatabaseDumpTests.swift */; }; + 567B5C572AD3301800629622 /* DatabaseReaderDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5BFE2AD3287800629622 /* DatabaseReaderDumpTests.swift */; }; 567DAF381EAB789800FC0928 /* DatabaseLogErrorTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567DAF341EAB789800FC0928 /* DatabaseLogErrorTests.swift */; }; 567E420A242AB3CB00CAAD2C /* FailureTestCase.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567E4207242AB3CB00CAAD2C /* FailureTestCase.swift */; }; 567F45AB1F888B2600030B59 /* TruncateOptimizationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567F45A71F888B2600030B59 /* TruncateOptimizationTests.swift */; }; 568068341EBBA26100EFB8AA /* SQLRequestTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568068301EBBA26100EFB8AA /* SQLRequestTests.swift */; }; + 5685C1962AD52EF200DA4B7A /* ListDumpFormat.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5685C1942AD52EF200DA4B7A /* ListDumpFormat.swift */; }; 56894F332604FC1E00268F4D /* Table.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56894F322604FC1E00268F4D /* Table.swift */; }; 56894FE3260658A400268F4D /* Decimal.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56894FA0260657F600268F4D /* Decimal.swift */; }; 56894FF2260658E600268F4D /* FoundationDecimalTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56894FF1260658E600268F4D /* FoundationDecimalTests.swift */; }; + 568C3F7D2A5AB2D500A2309D /* ForeignKeyDefinitionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F7B2A5AB2D500A2309D /* ForeignKeyDefinitionTests.swift */; }; 568EB71C2921232200E59445 /* DatabaseSnapshotPool.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568EB71A2921232200E59445 /* DatabaseSnapshotPool.swift */; }; 568EB7202921235E00E59445 /* DatabaseSnapshotPoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568EB71F2921235E00E59445 /* DatabaseSnapshotPoolTests.swift */; }; 568ECB0D25D904CA00B71526 /* SQLSelection.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568ECB0C25D904CA00B71526 /* SQLSelection.swift */; }; @@ -240,8 +263,11 @@ 56A8C2461D1918EF0096E9D4 /* FoundationUUIDTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56A8C21E1D1914110096E9D4 /* FoundationUUIDTests.swift */; }; 56AE6428222AACE300AD1B0B /* AssociationHasOneThroughSQLTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AE6426222AACE300AD1B0B /* AssociationHasOneThroughSQLTests.swift */; }; 56AF746E1D41FB9C005E9FF3 /* DatabaseValueConvertibleEscapingTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AF746A1D41FB9C005E9FF3 /* DatabaseValueConvertibleEscapingTests.swift */; }; + 56AFEF3229969F7E00CA1E51 /* TransactionClock.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AFEF3029969F7E00CA1E51 /* TransactionClock.swift */; }; + 56AFEF3A2996B9EE00CA1E51 /* TransactionDateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56AFEF382996B9EE00CA1E51 /* TransactionDateTests.swift */; }; 56B021CC1D8C0D3900B239BB /* MutablePersistableRecordPersistenceConflictPolicyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B021C81D8C0D3900B239BB /* MutablePersistableRecordPersistenceConflictPolicyTests.swift */; }; 56B14E821D4DAE54000BF4A3 /* RowFromDictionaryLiteralTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B14E7E1D4DAE54000BF4A3 /* RowFromDictionaryLiteralTests.swift */; }; + 56B6AB092BD3DCE0009A0B71 /* SingletonUserDefaultsTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B6AB072BD3DCE0009A0B71 /* SingletonUserDefaultsTest.swift */; }; 56B6EF60208CB746002F0ACB /* ColumnExpressionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B6EF5E208CB746002F0ACB /* ColumnExpressionTests.swift */; }; 56B86E70220FF4C900524C16 /* SQLLiteralTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B86E6E220FF4C800524C16 /* SQLLiteralTests.swift */; }; 56B9649F1DA51B4C0002DA19 /* FTS5.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B9649C1DA51B4C0002DA19 /* FTS5.swift */; }; @@ -274,6 +300,7 @@ 56D110F528AFC90800E64463 /* MutablePersistableRecord+Save.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D110E728AFC90800E64463 /* MutablePersistableRecord+Save.swift */; }; 56D110F728AFC90800E64463 /* MutablePersistableRecord+Update.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D110E828AFC90800E64463 /* MutablePersistableRecord+Update.swift */; }; 56D110FF28AFC9C600E64463 /* MutablePersistableRecord+DAO.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D110FE28AFC9C600E64463 /* MutablePersistableRecord+DAO.swift */; }; + 56D3332329C38D7B00430680 /* WALSnapshotTransaction.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D3332129C38D7A00430680 /* WALSnapshotTransaction.swift */; }; 56D507611F6BAE8600AE1C5B /* PrimaryKeyInfoTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D5075D1F6BAE8600AE1C5B /* PrimaryKeyInfoTests.swift */; }; 56D51D021EA789FA0074638A /* FetchableRecord+TableRecord.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56D51CFF1EA789FA0074638A /* FetchableRecord+TableRecord.swift */; }; 56DA7D03260FAA1B00A8D97B /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56DA7D02260FAA1A00A8D97B /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */; }; @@ -290,6 +317,14 @@ 56F34FC624B0A0C9007513FC /* SQLIdentifyingColumnsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F34FC524B0A0C8007513FC /* SQLIdentifyingColumnsTests.swift */; }; 56F3E74C1E66F83A00BF0F01 /* ResultCodeTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F3E7481E66F83A00BF0F01 /* ResultCodeTests.swift */; }; 56F61DEA283D469F00AF9884 /* getThreadsCount.c in Sources */ = {isa = PBXBuildFile; fileRef = 56F61DE8283D469F00AF9884 /* getThreadsCount.c */; }; + 56F89DFA2A57EAB9002FE2AA /* ColumnDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89DF82A57EAB9002FE2AA /* ColumnDefinition.swift */; }; + 56F89E072A57EBA7002FE2AA /* TableAlteration.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E032A57EBA7002FE2AA /* TableAlteration.swift */; }; + 56F89E082A57EBA7002FE2AA /* Database+SchemaDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E042A57EBA7002FE2AA /* Database+SchemaDefinition.swift */; }; + 56F89E092A57EBA7002FE2AA /* ForeignKeyDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E052A57EBA7002FE2AA /* ForeignKeyDefinition.swift */; }; + 56F89E0A2A57EBA7002FE2AA /* IndexDefinition.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E062A57EBA7002FE2AA /* IndexDefinition.swift */; }; + 56F89E1A2A585E0D002FE2AA /* SQLColumnGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E162A585E0D002FE2AA /* SQLColumnGenerator.swift */; }; + 56F89E1B2A585E0D002FE2AA /* SQLTableAlterationGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E182A585E0D002FE2AA /* SQLTableAlterationGenerator.swift */; }; + 56F89E1C2A585E0D002FE2AA /* SQLTableGenerator.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56F89E192A585E0D002FE2AA /* SQLTableGenerator.swift */; }; 56FA0C3728B1F2EB00B2DFF7 /* MutablePersistableRecord+Upsert.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FA0C3428B1F2EB00B2DFF7 /* MutablePersistableRecord+Upsert.swift */; }; 56FA0C4128B20ADB00B2DFF7 /* PersistableRecord+Upsert.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FA0C4028B20ADB00B2DFF7 /* PersistableRecord+Upsert.swift */; }; 56FBFED62210731100945324 /* SQLRequest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FBFED52210731000945324 /* SQLRequest.swift */; }; @@ -298,6 +333,7 @@ 56FF45431D2C23BA00F21EF9 /* TableRecordDeleteTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FF453F1D2C23BA00F21EF9 /* TableRecordDeleteTests.swift */; }; 56FF45591D2CDA5200F21EF9 /* RecordUniqueIndexTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56FF45551D2CDA5200F21EF9 /* RecordUniqueIndexTests.swift */; }; 6340BF831E5E3F7900832805 /* RecordPersistenceConflictPolicy.swift in Sources */ = {isa = PBXBuildFile; fileRef = 6340BF7F1E5E3F7900832805 /* RecordPersistenceConflictPolicy.swift */; }; + 648704BA2B8261070036480B /* PrivacyInfo.xcprivacy in Resources */ = {isa = PBXBuildFile; fileRef = 648704B82B8261070036480B /* PrivacyInfo.xcprivacy */; }; EED476F21CFD17270026A4EC /* GRDBCustomSQLite-USER.h in Headers */ = {isa = PBXBuildFile; fileRef = EED476F11CFD16FF0026A4EC /* GRDBCustomSQLite-USER.h */; settings = {ATTRIBUTES = (Public, ); }; }; F3BA80661CFB2E55003DC1BA /* Configuration.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56A238701B9C75030082EB20 /* Configuration.swift */; }; F3BA80671CFB2E55003DC1BA /* Database.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56A238711B9C75030082EB20 /* Database.swift */; }; @@ -420,6 +456,10 @@ 56012B80257404A300B4925B /* CommonTableExpression.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = CommonTableExpression.swift; sourceTree = ""; }; 560233CC2724339A00529DF3 /* SharedValueObservationTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SharedValueObservationTests.swift; sourceTree = ""; }; 560233D027243A9100529DF3 /* SharedValueObservation.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SharedValueObservation.swift; sourceTree = ""; }; + 5603CEBF2AC862F800CF097D /* SQLJSONFunctions.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLJSONFunctions.swift; sourceTree = ""; }; + 5603CEC02AC862F800CF097D /* SQLJSONExpressible.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLJSONExpressible.swift; sourceTree = ""; }; + 5603CEC12AC862F800CF097D /* JSONColumn.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONColumn.swift; sourceTree = ""; }; + 5603CECC2AC8633B00CF097D /* JSONExpressionsTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONExpressionsTests.swift; sourceTree = ""; }; 56043295228F00A9009D3FE2 /* OrderedDictionaryTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = OrderedDictionaryTests.swift; sourceTree = ""; }; 560432A5228F167A009D3FE2 /* AssociationPrefetchingObservationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationPrefetchingObservationTests.swift; sourceTree = ""; }; 5604484C25DEEF7C002BAA79 /* AssociationPrefetchingRelationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationPrefetchingRelationTests.swift; sourceTree = ""; }; @@ -456,6 +496,9 @@ 561CFA7B2373503D000C8BAA /* TableRecordUpdateTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableRecordUpdateTests.swift; sourceTree = ""; }; 561CFA9F2376EF4F000C8BAA /* AssociationHasManyOrderingTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationHasManyOrderingTests.swift; sourceTree = ""; }; 561CFAA32376EF59000C8BAA /* AssociationHasManyThroughOrderingTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationHasManyThroughOrderingTests.swift; sourceTree = ""; }; + 561F38DA2AC8914D0051EEE9 /* JSONColumnTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONColumnTests.swift; sourceTree = ""; }; + 561F38F02AC9CE220051EEE9 /* DatabaseDataEncodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataEncodingStrategyTests.swift; sourceTree = ""; }; + 561F38F52AC9CE5A0051EEE9 /* DatabaseDataDecodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataDecodingStrategyTests.swift; sourceTree = ""; }; 56231E6025CEBF06001DFD2F /* RowDecodingError.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = RowDecodingError.swift; sourceTree = ""; }; 562393171DECC02000A6B01F /* RowFetchTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RowFetchTests.swift; sourceTree = ""; }; 5623932F1DEDFC5700A6B01F /* AnyCursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AnyCursorTests.swift; sourceTree = ""; }; @@ -464,9 +507,12 @@ 5623935F1DEE06D300A6B01F /* CursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CursorTests.swift; sourceTree = ""; }; 562393681DEE0CD200A6B01F /* FlattenCursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FlattenCursorTests.swift; sourceTree = ""; }; 562393711DEE104400A6B01F /* MapCursorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MapCursorTests.swift; sourceTree = ""; }; + 5623B6162AED39C200436239 /* DatabaseQueueInMemoryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueInMemoryCopyTests.swift; sourceTree = ""; }; + 5623B6182AED39C200436239 /* DatabaseQueueTemporaryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueTemporaryCopyTests.swift; sourceTree = ""; }; 5623E0901B4AFACC00B20B7F /* GRDBTestCase.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GRDBTestCase.swift; sourceTree = ""; }; 56256EDD25D1BC07008C2BDD /* ForeignKey.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ForeignKey.swift; sourceTree = ""; }; 562756421E963AAC0035B653 /* DatabaseWriterTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseWriterTests.swift; sourceTree = ""; }; + 562B58CC2A29BC0700E8C75D /* Issue1383.sqlite */ = {isa = PBXFileReference; lastKnownFileType = file; path = Issue1383.sqlite; sourceTree = ""; }; 562EA8251F17B2AC00FA528C /* CompilationProtocolTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CompilationProtocolTests.swift; sourceTree = ""; }; 562EA82E1F17B9EB00FA528C /* CompilationSubClassTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CompilationSubClassTests.swift; sourceTree = ""; }; 56300B5D1C53C38F005A543B /* QueryInterfaceRequestTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = QueryInterfaceRequestTests.swift; sourceTree = ""; }; @@ -498,6 +544,7 @@ 563B8FB924A1D036007A48C9 /* ReceiveValuesOn.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ReceiveValuesOn.swift; sourceTree = ""; }; 563B8FBC24A1D388007A48C9 /* OnDemandFuture.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = OnDemandFuture.swift; sourceTree = ""; }; 563C67B624628C0C00E94EDC /* DatabasePoolTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabasePoolTests.swift; sourceTree = ""; }; + 563CBBE22A595141008905CE /* SQLIndexGenerator.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLIndexGenerator.swift; sourceTree = ""; }; 563DE4F6231A91F6005081B7 /* DatabaseConfigurationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseConfigurationTests.swift; sourceTree = ""; }; 563EF41E215F8A76007DAACD /* OrderedDictionary.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = OrderedDictionary.swift; sourceTree = ""; }; 563EF441216131F5007DAACD /* AssociationAggregateTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationAggregateTests.swift; sourceTree = ""; }; @@ -519,7 +566,7 @@ 56419A7F24A51614004967E1 /* Map.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Map.swift; sourceTree = ""; }; 56419A8024A51614004967E1 /* Inverted.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = Inverted.swift; sourceTree = ""; }; 56419A8124A51614004967E1 /* PublisherExpectation.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PublisherExpectation.swift; sourceTree = ""; }; - 5643676E272EDF3700C718C7 /* Scripts */ = {isa = PBXFileReference; lastKnownFileType = folder; path = Scripts; sourceTree = ""; }; + 5642A3192AD66E0C0065F717 /* LineDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = LineDumpFormat.swift; sourceTree = ""; }; 564448821EF56B1B00DD2861 /* DatabaseAfterNextTransactionCommitTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseAfterNextTransactionCommitTests.swift; sourceTree = ""; }; 5644DE7E20F8D1D1001FFDDE /* DatabaseValueConversionErrorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseValueConversionErrorTests.swift; sourceTree = ""; }; 564A50C61BFF4B7F00B3A3A2 /* DatabaseCollationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseCollationTests.swift; sourceTree = ""; }; @@ -633,14 +680,25 @@ 5676FBA822F5CEB8004717D9 /* ValueObservationRegionRecordingTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ValueObservationRegionRecordingTests.swift; sourceTree = ""; }; 56781B06243F7B4B00650A83 /* GRDB-Bridging.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = "GRDB-Bridging.h"; path = "SQLiteCustom/GRDB-Bridging.h"; sourceTree = ""; }; 567A80521D41350C00C7DCEC /* IndexInfoTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = IndexInfoTests.swift; sourceTree = ""; }; + 567B5BEE2AD3285100629622 /* DatabaseReader+dump.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "DatabaseReader+dump.swift"; sourceTree = ""; }; + 567B5BEF2AD3285100629622 /* DumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DumpFormat.swift; sourceTree = ""; }; + 567B5BF12AD3285100629622 /* QuoteDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = QuoteDumpFormat.swift; sourceTree = ""; }; + 567B5BF22AD3285100629622 /* JSONDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONDumpFormat.swift; sourceTree = ""; }; + 567B5BF32AD3285100629622 /* DebugDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DebugDumpFormat.swift; sourceTree = ""; }; + 567B5BF42AD3285100629622 /* Database+Dump.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "Database+Dump.swift"; sourceTree = ""; }; + 567B5BFD2AD3287800629622 /* DatabaseDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDumpTests.swift; sourceTree = ""; }; + 567B5BFE2AD3287800629622 /* DatabaseReaderDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseReaderDumpTests.swift; sourceTree = ""; }; + 567B5C022AD328D900629622 /* DatabaseColumnEncodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseColumnEncodingStrategyTests.swift; sourceTree = ""; }; 567DAF341EAB789800FC0928 /* DatabaseLogErrorTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseLogErrorTests.swift; sourceTree = ""; }; 567E4207242AB3CB00CAAD2C /* FailureTestCase.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = FailureTestCase.swift; sourceTree = ""; }; 567F45A71F888B2600030B59 /* TruncateOptimizationTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TruncateOptimizationTests.swift; sourceTree = ""; }; 568068301EBBA26100EFB8AA /* SQLRequestTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLRequestTests.swift; sourceTree = ""; }; + 5685C1942AD52EF200DA4B7A /* ListDumpFormat.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListDumpFormat.swift; sourceTree = ""; }; 5687359E1CEDE16C009B9116 /* Betty.jpeg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = Betty.jpeg; sourceTree = ""; }; 56894F322604FC1E00268F4D /* Table.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Table.swift; sourceTree = ""; }; 56894FA0260657F600268F4D /* Decimal.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Decimal.swift; sourceTree = ""; }; 56894FF1260658E600268F4D /* FoundationDecimalTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FoundationDecimalTests.swift; sourceTree = ""; }; + 568C3F7B2A5AB2D500A2309D /* ForeignKeyDefinitionTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ForeignKeyDefinitionTests.swift; sourceTree = ""; }; 568EB71A2921232200E59445 /* DatabaseSnapshotPool.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DatabaseSnapshotPool.swift; sourceTree = ""; }; 568EB71F2921235E00E59445 /* DatabaseSnapshotPoolTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = DatabaseSnapshotPoolTests.swift; sourceTree = ""; }; 568ECB0C25D904CA00B71526 /* SQLSelection.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLSelection.swift; sourceTree = ""; }; @@ -722,8 +780,11 @@ 56A8C2361D1914790096E9D4 /* FoundationNSUUIDTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FoundationNSUUIDTests.swift; sourceTree = ""; }; 56AE6426222AACE300AD1B0B /* AssociationHasOneThroughSQLTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationHasOneThroughSQLTests.swift; sourceTree = ""; }; 56AF746A1D41FB9C005E9FF3 /* DatabaseValueConvertibleEscapingTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseValueConvertibleEscapingTests.swift; sourceTree = ""; }; + 56AFEF3029969F7E00CA1E51 /* TransactionClock.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TransactionClock.swift; sourceTree = ""; }; + 56AFEF382996B9EE00CA1E51 /* TransactionDateTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = TransactionDateTests.swift; sourceTree = ""; }; 56B021C81D8C0D3900B239BB /* MutablePersistableRecordPersistenceConflictPolicyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MutablePersistableRecordPersistenceConflictPolicyTests.swift; sourceTree = ""; }; 56B14E7E1D4DAE54000BF4A3 /* RowFromDictionaryLiteralTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RowFromDictionaryLiteralTests.swift; sourceTree = ""; }; + 56B6AB072BD3DCE0009A0B71 /* SingletonUserDefaultsTest.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SingletonUserDefaultsTest.swift; sourceTree = ""; }; 56B6EF5E208CB746002F0ACB /* ColumnExpressionTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ColumnExpressionTests.swift; sourceTree = ""; }; 56B7F4291BE14A1900E39BBF /* CGFloatTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CGFloatTests.swift; sourceTree = ""; }; 56B7F4391BEB42D500E39BBF /* Migration.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Migration.swift; sourceTree = ""; }; @@ -759,6 +820,7 @@ 56D110E728AFC90800E64463 /* MutablePersistableRecord+Save.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "MutablePersistableRecord+Save.swift"; sourceTree = ""; }; 56D110E828AFC90800E64463 /* MutablePersistableRecord+Update.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "MutablePersistableRecord+Update.swift"; sourceTree = ""; }; 56D110FE28AFC9C600E64463 /* MutablePersistableRecord+DAO.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "MutablePersistableRecord+DAO.swift"; sourceTree = ""; }; + 56D3332129C38D7A00430680 /* WALSnapshotTransaction.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WALSnapshotTransaction.swift; sourceTree = ""; }; 56D5075D1F6BAE8600AE1C5B /* PrimaryKeyInfoTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = PrimaryKeyInfoTests.swift; sourceTree = ""; }; 56D51CFF1EA789FA0074638A /* FetchableRecord+TableRecord.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "FetchableRecord+TableRecord.swift"; sourceTree = ""; }; 56DA7D02260FAA1A00A8D97B /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RecordMinimalNonOptionalPrimaryKeySingleTests.swift; sourceTree = ""; }; @@ -783,6 +845,14 @@ 56F3E7481E66F83A00BF0F01 /* ResultCodeTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ResultCodeTests.swift; sourceTree = ""; }; 56F61DE8283D469F00AF9884 /* getThreadsCount.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = getThreadsCount.c; sourceTree = ""; }; 56F61DE9283D469F00AF9884 /* getThreadsCount.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = getThreadsCount.h; sourceTree = ""; }; + 56F89DF82A57EAB9002FE2AA /* ColumnDefinition.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ColumnDefinition.swift; sourceTree = ""; }; + 56F89E032A57EBA7002FE2AA /* TableAlteration.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableAlteration.swift; sourceTree = ""; }; + 56F89E042A57EBA7002FE2AA /* Database+SchemaDefinition.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "Database+SchemaDefinition.swift"; sourceTree = ""; }; + 56F89E052A57EBA7002FE2AA /* ForeignKeyDefinition.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ForeignKeyDefinition.swift; sourceTree = ""; }; + 56F89E062A57EBA7002FE2AA /* IndexDefinition.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = IndexDefinition.swift; sourceTree = ""; }; + 56F89E162A585E0D002FE2AA /* SQLColumnGenerator.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLColumnGenerator.swift; sourceTree = ""; }; + 56F89E182A585E0D002FE2AA /* SQLTableAlterationGenerator.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLTableAlterationGenerator.swift; sourceTree = ""; }; + 56F89E192A585E0D002FE2AA /* SQLTableGenerator.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLTableGenerator.swift; sourceTree = ""; }; 56FA0C3428B1F2EB00B2DFF7 /* MutablePersistableRecord+Upsert.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "MutablePersistableRecord+Upsert.swift"; sourceTree = ""; }; 56FA0C4028B20ADB00B2DFF7 /* PersistableRecord+Upsert.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "PersistableRecord+Upsert.swift"; sourceTree = ""; }; 56FBFED52210731000945324 /* SQLRequest.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SQLRequest.swift; sourceTree = ""; }; @@ -792,6 +862,7 @@ 56FF453F1D2C23BA00F21EF9 /* TableRecordDeleteTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableRecordDeleteTests.swift; sourceTree = ""; }; 56FF45551D2CDA5200F21EF9 /* RecordUniqueIndexTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RecordUniqueIndexTests.swift; sourceTree = ""; }; 6340BF7F1E5E3F7900832805 /* RecordPersistenceConflictPolicy.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RecordPersistenceConflictPolicy.swift; sourceTree = ""; }; + 648704B82B8261070036480B /* PrivacyInfo.xcprivacy */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xml; path = PrivacyInfo.xcprivacy; sourceTree = ""; }; DC2393C61ABE35F8003FF113 /* GRDB-Bridging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "GRDB-Bridging.h"; sourceTree = ""; }; DC3773F719C8CBB3004FCF85 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; DC3773F819C8CBB3004FCF85 /* GRDB.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = GRDB.h; sourceTree = ""; }; @@ -828,6 +899,25 @@ /* End PBXFrameworksBuildPhase section */ /* Begin PBXGroup section */ + 5603CEBE2AC862F800CF097D /* JSON */ = { + isa = PBXGroup; + children = ( + 5603CEC12AC862F800CF097D /* JSONColumn.swift */, + 5603CEC02AC862F800CF097D /* SQLJSONExpressible.swift */, + 5603CEBF2AC862F800CF097D /* SQLJSONFunctions.swift */, + ); + path = JSON; + sourceTree = ""; + }; + 5603CECB2AC8632A00CF097D /* JSON */ = { + isa = PBXGroup; + children = ( + 561F38DA2AC8914D0051EEE9 /* JSONColumnTests.swift */, + 5603CECC2AC8633B00CF097D /* JSONExpressionsTests.swift */, + ); + name = JSON; + sourceTree = ""; + }; 5605F1471C672E4000235C62 /* Support */ = { isa = PBXGroup; children = ( @@ -906,6 +996,8 @@ 560B3FA41C19DFF800C58EC7 /* PersistableRecord */ = { isa = PBXGroup; children = ( + 567B5C022AD328D900629622 /* DatabaseColumnEncodingStrategyTests.swift */, + 561F38F02AC9CE220051EEE9 /* DatabaseDataEncodingStrategyTests.swift */, 5665FA3B2129EED8004D8612 /* DatabaseDateEncodingStrategyTests.swift */, 56703299212B5461007D270F /* DatabaseUUIDEncodingStrategyTests.swift */, 566A84422041AB2D00E50BFD /* MutablePersistableRecordChangesTests.swift */, @@ -940,12 +1032,14 @@ 56176C581EACC2D8000F3F2B /* GRDBTests */ = { isa = PBXGroup; children = ( + 567E4207242AB3CB00CAAD2C /* FailureTestCase.swift */, + 5623E0901B4AFACC00B20B7F /* GRDBTestCase.swift */, 562EA81E1F17B26F00FA528C /* Compilation */, 56A238111B9C74A90082EB20 /* Core */, - 567E4207242AB3CB00CAAD2C /* FailureTestCase.swift */, + 567B5BFC2AD3285C00629622 /* Dump */, 5698AC3E1DA2BEBB0056AF8C /* FTS */, 56176CA01EACEE2A000F3F2B /* GRDBCipher */, - 5623E0901B4AFACC00B20B7F /* GRDBTestCase.swift */, + 5603CECB2AC8632A00CF097D /* JSON */, 56A238231B9C74A90082EB20 /* Migrations */, 569978D31B539038005EBEED /* Private */, 56300B5C1C53C38F005A543B /* QueryInterface */, @@ -1020,6 +1114,7 @@ 56012B682573FAE600B4925B /* CommonTableExpressionTests.swift */, 56EA63C7209C7F1E009715B8 /* DerivableRequestTests.swift */, 56300B601C53C42C005A543B /* FetchableRecord+QueryInterfaceRequestTests.swift */, + 568C3F7B2A5AB2D500A2309D /* ForeignKeyDefinitionTests.swift */, 56300B671C53D25E005A543B /* QueryInterfaceExpressionsTests.swift */, 5698AC021D9B9FCF0056AF8C /* QueryInterfaceExtensibilityTests.swift */, 563EF45521631E3E007DAACD /* QueryInterfacePromiseTests.swift */, @@ -1042,8 +1137,10 @@ 5687359E1CEDE16C009B9116 /* Betty.jpeg */, 5672DE581CDB72520022BA81 /* DatabaseQueueBackupTests.swift */, 563363BC1C93FD5E000BE133 /* DatabaseQueueConcurrencyTests.swift */, + 5623B6162AED39C200436239 /* DatabaseQueueInMemoryCopyTests.swift */, 56A238141B9C74A90082EB20 /* DatabaseQueueInMemoryTests.swift */, 567156151CB142AA007DC145 /* DatabaseQueueReadOnlyTests.swift */, + 5623B6182AED39C200436239 /* DatabaseQueueTemporaryCopyTests.swift */, 569178451CED9B6000E179EA /* DatabaseQueueTests.swift */, ); name = DatabaseQueue; @@ -1052,6 +1149,7 @@ 563B06DB2185E04E00B38F35 /* ValueObservation */ = { isa = PBXGroup; children = ( + 562B58CC2A29BC0700E8C75D /* Issue1383.sqlite */, 560233CC2724339A00529DF3 /* SharedValueObservationTests.swift */, 563B06FE21861D9D00B38F35 /* ValueObservationCountTests.swift */, 563B071A21862F5600B38F35 /* ValueObservationDatabaseValueConvertibleTests.swift */, @@ -1167,8 +1265,12 @@ 5656A8292295BD56001FF3FF /* SQLGeneration */ = { isa = PBXGroup; children = ( + 56F89E162A585E0D002FE2AA /* SQLColumnGenerator.swift */, 5656A82B2295BD56001FF3FF /* SQLGenerationContext.swift */, + 563CBBE22A595141008905CE /* SQLIndexGenerator.swift */, 5656A82A2295BD56001FF3FF /* SQLQueryGenerator.swift */, + 56F89E182A585E0D002FE2AA /* SQLTableAlterationGenerator.swift */, + 56F89E192A585E0D002FE2AA /* SQLTableGenerator.swift */, ); path = SQLGeneration; sourceTree = ""; @@ -1176,6 +1278,11 @@ 5656A82C2295BD56001FF3FF /* Schema */ = { isa = PBXGroup; children = ( + 56F89DF82A57EAB9002FE2AA /* ColumnDefinition.swift */, + 56F89E042A57EBA7002FE2AA /* Database+SchemaDefinition.swift */, + 56F89E052A57EBA7002FE2AA /* ForeignKeyDefinition.swift */, + 56F89E062A57EBA7002FE2AA /* IndexDefinition.swift */, + 56F89E032A57EBA7002FE2AA /* TableAlteration.swift */, 5656A82D2295BD56001FF3FF /* TableDefinition.swift */, 5656A82E2295BD56001FF3FF /* VirtualTableModule.swift */, ); @@ -1258,6 +1365,7 @@ 5674A7251F30A8EF0095F066 /* FetchableRecord */ = { isa = PBXGroup; children = ( + 561F38F52AC9CE5A0051EEE9 /* DatabaseDataDecodingStrategyTests.swift */, 5665FA1C2129D807004D8612 /* DatabaseDateDecodingStrategyTests.swift */, 5674A7261F30A9090095F066 /* FetchableRecordDecodableTests.swift */, 565B0FEE1BBC7D980098DE03 /* FetchableRecordTests.swift */, @@ -1265,6 +1373,38 @@ name = FetchableRecord; sourceTree = ""; }; + 567B5BED2AD3285100629622 /* Dump */ = { + isa = PBXGroup; + children = ( + 567B5BF42AD3285100629622 /* Database+Dump.swift */, + 567B5BEE2AD3285100629622 /* DatabaseReader+dump.swift */, + 567B5BEF2AD3285100629622 /* DumpFormat.swift */, + 567B5BF02AD3285100629622 /* DumpFormats */, + ); + path = Dump; + sourceTree = ""; + }; + 567B5BF02AD3285100629622 /* DumpFormats */ = { + isa = PBXGroup; + children = ( + 567B5BF12AD3285100629622 /* QuoteDumpFormat.swift */, + 567B5BF22AD3285100629622 /* JSONDumpFormat.swift */, + 5642A3192AD66E0C0065F717 /* LineDumpFormat.swift */, + 5685C1942AD52EF200DA4B7A /* ListDumpFormat.swift */, + 567B5BF32AD3285100629622 /* DebugDumpFormat.swift */, + ); + path = DumpFormats; + sourceTree = ""; + }; + 567B5BFC2AD3285C00629622 /* Dump */ = { + isa = PBXGroup; + children = ( + 567B5BFD2AD3287800629622 /* DatabaseDumpTests.swift */, + 567B5BFE2AD3287800629622 /* DatabaseReaderDumpTests.swift */, + ); + name = Dump; + sourceTree = ""; + }; 5698AC291D9E5A480056AF8C /* FTS */ = { isa = PBXGroup; children = ( @@ -1364,6 +1504,7 @@ 568068301EBBA26100EFB8AA /* SQLRequestTests.swift */, 56A238201B9C74A90082EB20 /* Statement */, 56E8CE0F1BB4FE5B00828BEC /* StatementColumnConvertibleFetchTests.swift */, + 56AFEF382996B9EE00CA1E51 /* TransactionDateTests.swift */, 5607EFD11BB8253300605DE3 /* TransactionObserver */, ); name = Core; @@ -1419,6 +1560,7 @@ children = ( 564E73E7203DA278000C443C /* JoinSupportTests.swift */, 5616B4FE28B5F5490052017E /* SingletonRecordTest.swift */, + 56B6AB072BD3DCE0009A0B71 /* SingletonUserDefaultsTest.swift */, 5674A7251F30A8EF0095F066 /* FetchableRecord */, 560B3FA41C19DFF800C58EC7 /* PersistableRecord */, 56176C9E1EACEDF9000F3F2B /* Record */, @@ -1457,12 +1599,14 @@ 56231E6025CEBF06001DFD2F /* RowDecodingError.swift */, 56BB6EA81D3009B100A1CA52 /* SchedulingWatchdog.swift */, 560A37A61C8FF6E500949E71 /* SerializedDatabase.swift */, + 56D3332129C38D7A00430680 /* WALSnapshotTransaction.swift */, 56A6EB2226076F6A00C27594 /* SQL.swift */, 56E9FAC32210468500C703A8 /* SQLInterpolation.swift */, 56FBFED52210731000945324 /* SQLRequest.swift */, 56A238781B9C75030082EB20 /* Statement.swift */, 566B912A1FA4D0CC0012D5B0 /* StatementAuthorizer.swift */, 560D923F1C672C3E00F4F92B /* StatementColumnConvertible.swift */, + 56AFEF3029969F7E00CA1E51 /* TransactionClock.swift */, 566B91321FA4D3810012D5B0 /* TransactionObserver.swift */, 56564F3828637C9900A19E9F /* WALSnapshot.swift */, 5605F1471C672E4000235C62 /* Support */, @@ -1575,7 +1719,6 @@ DC37742D19C8CC90004FCF85 /* GRDB */, F3BA7FED1CFB21F8003DC1BA /* GRDBCustomSQLite */, DC10500F19C904DD00D8CA30 /* Tests */, - 5643676E272EDF3700C718C7 /* Scripts */, DC3773F419C8CBB3004FCF85 /* Products */, ); indentWidth = 4; @@ -1605,8 +1748,11 @@ children = ( 56A2FA3724424F4200E97D23 /* Export.swift */, 566DDE11288D76400000DCFB /* Fixits.swift */, + 648704B82B8261070036480B /* PrivacyInfo.xcprivacy */, 56A2386F1B9C75030082EB20 /* Core */, + 567B5BED2AD3285100629622 /* Dump */, 5698AC291D9E5A480056AF8C /* FTS */, + 5603CEBE2AC862F800CF097D /* JSON */, 56A238911B9C750B0082EB20 /* Migration */, 5656A8252295BD56001FF3FF /* QueryInterface */, 56A2389F1B9C753B0082EB20 /* Record */, @@ -1779,6 +1925,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 648704BA2B8261070036480B /* PrivacyInfo.xcprivacy in Resources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -1786,6 +1933,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 562B58CE2A29BC0700E8C75D /* Issue1383.sqlite in Resources */, 569BBA43229066CB00478429 /* InflectionsTests.json in Resources */, F3BA80CD1CFB2FDD003DC1BA /* Betty.jpeg in Resources */, ); @@ -1839,6 +1987,7 @@ files = ( 5656A8712295BD56001FF3FF /* HasOneThroughAssociation.swift in Sources */, 56713FE32691F487006153C3 /* JSONRequiredEncoder.swift in Sources */, + 5685C1962AD52EF200DA4B7A /* ListDumpFormat.swift in Sources */, 566BD7332927AFD600595649 /* ValueConcurrentObserver.swift in Sources */, 56158786288D878500A67323 /* Optional.swift in Sources */, 5636E9BE1D22574100B9B05F /* FetchRequest.swift in Sources */, @@ -1846,6 +1995,8 @@ 563B8FBD24A1D388007A48C9 /* OnDemandFuture.swift in Sources */, 5656A85D2295BD56001FF3FF /* VirtualTableModule.swift in Sources */, 5656A8872295BD56001FF3FF /* SQLOperators.swift in Sources */, + 56F89E092A57EBA7002FE2AA /* ForeignKeyDefinition.swift in Sources */, + 567B5BFB2AD3285100629622 /* Database+Dump.swift in Sources */, 566DDE12288D76400000DCFB /* Fixits.swift in Sources */, 563EF44D2161F196007DAACD /* Inflections.swift in Sources */, 566B910B1FA4C3970012D5B0 /* Database+Statements.swift in Sources */, @@ -1860,6 +2011,7 @@ 56CEB4F31EAA2EFA00BFAF62 /* FetchableRecord.swift in Sources */, 5656A8512295BD56001FF3FF /* SQLInterpolation+QueryInterface.swift in Sources */, 56BB6EAB1D3009B100A1CA52 /* SchedulingWatchdog.swift in Sources */, + 56F89E1A2A585E0D002FE2AA /* SQLColumnGenerator.swift in Sources */, 56894F332604FC1E00268F4D /* Table.swift in Sources */, F3BA80761CFB2E55003DC1BA /* StatementColumnConvertible.swift in Sources */, 566B91351FA4D3810012D5B0 /* TransactionObserver.swift in Sources */, @@ -1911,6 +2063,7 @@ 563B8FB224A1CE9E007A48C9 /* DatabasePublishers.swift in Sources */, 56C0539D22ACEECD0029D27D /* RemoveDuplicates.swift in Sources */, F3BA807C1CFB2E61003DC1BA /* Date.swift in Sources */, + 56F89E1B2A585E0D002FE2AA /* SQLTableAlterationGenerator.swift in Sources */, 56A8C2321D1914540096E9D4 /* UUID.swift in Sources */, 5656A8812295BD56001FF3FF /* SQLCollection.swift in Sources */, 56D51D021EA789FA0074638A /* FetchableRecord+TableRecord.swift in Sources */, @@ -1923,6 +2076,7 @@ 5656A8852295BD56001FF3FF /* SQLOrdering.swift in Sources */, F3BA808D1CFB2E75003DC1BA /* Migration.swift in Sources */, F3BA80791CFB2E61003DC1BA /* DatabaseDateComponents.swift in Sources */, + 56AFEF3229969F7E00CA1E51 /* TransactionClock.swift in Sources */, 5674A6EC1F307F0E0095F066 /* DatabaseValueConvertible+Encodable.swift in Sources */, 568ECB0D25D904CA00B71526 /* SQLSelection.swift in Sources */, 5657AB111D10899D006283EF /* URL.swift in Sources */, @@ -1937,6 +2091,9 @@ 5656A8832295BD56001FF3FF /* SQLExpression.swift in Sources */, 5659F48A1EA8D94E004A4992 /* Utils.swift in Sources */, F3BA807F1CFB2E61003DC1BA /* NSString.swift in Sources */, + 56F89DFA2A57EAB9002FE2AA /* ColumnDefinition.swift in Sources */, + 56F89E1C2A585E0D002FE2AA /* SQLTableGenerator.swift in Sources */, + 5603CEC52AC862F800CF097D /* SQLJSONExpressible.swift in Sources */, 560233D127243A9200529DF3 /* SharedValueObservation.swift in Sources */, 5656A8592295BD56001FF3FF /* SQLGenerationContext.swift in Sources */, 5690C3421D23E82A00E59934 /* Data.swift in Sources */, @@ -1950,7 +2107,9 @@ 5656A87F2295BD56001FF3FF /* SQLForeignKeyRequest.swift in Sources */, 56B964BB1DA51D0A0002DA19 /* FTS5Pattern.swift in Sources */, F3BA807D1CFB2E61003DC1BA /* NSNull.swift in Sources */, + 56F89E072A57EBA7002FE2AA /* TableAlteration.swift in Sources */, 56FBFED62210731100945324 /* SQLRequest.swift in Sources */, + 567B5BF72AD3285100629622 /* DumpFormat.swift in Sources */, 5674A6FC1F307F600095F066 /* FetchableRecord+Decodable.swift in Sources */, F3BA80701CFB2E55003DC1BA /* DatabaseValueConvertible.swift in Sources */, 56D110EB28AFC90800E64463 /* MutablePersistableRecord+Insert.swift in Sources */, @@ -1964,16 +2123,27 @@ 5698AD231DABAEFA0056AF8C /* FTS5WrapperTokenizer.swift in Sources */, 5656A85B2295BD56001FF3FF /* TableDefinition.swift in Sources */, 5656A8552295BD56001FF3FF /* FTS5+QueryInterface.swift in Sources */, + 567B5BF92AD3285100629622 /* JSONDumpFormat.swift in Sources */, + 5603CEC62AC862F800CF097D /* JSONColumn.swift in Sources */, 5656A88F2295BD56001FF3FF /* Column.swift in Sources */, 564CE5B721B8FBEB00652B19 /* DatabaseRegionObservation.swift in Sources */, + 5642A31B2AD66E0C0065F717 /* LineDumpFormat.swift in Sources */, 5656A8612295BD56001FF3FF /* TableRecord+Association.swift in Sources */, + 567B5BFA2AD3285100629622 /* DebugDumpFormat.swift in Sources */, + 5603CEC42AC862F800CF097D /* SQLJSONFunctions.swift in Sources */, F3BA808C1CFB2E75003DC1BA /* DatabaseMigrator.swift in Sources */, + 563CBBE42A595141008905CE /* SQLIndexGenerator.swift in Sources */, 5613ED6121A95E6100DC7A68 /* ValueObservation.swift in Sources */, F3BA80921CFB2E7A003DC1BA /* TableRecord.swift in Sources */, + 56F89E0A2A57EBA7002FE2AA /* IndexDefinition.swift in Sources */, 567071F4208A00BE006AD95A /* SQLiteDateParser.swift in Sources */, 56D110F328AFC90800E64463 /* MutablePersistableRecord.swift in Sources */, + 567B5BF82AD3285100629622 /* QuoteDumpFormat.swift in Sources */, + 56F89E082A57EBA7002FE2AA /* Database+SchemaDefinition.swift in Sources */, F3BA80831CFB2E67003DC1BA /* DatabaseValueConvertible+RawRepresentable.swift in Sources */, 5657AABB1D107001006283EF /* NSData.swift in Sources */, + 56D3332329C38D7B00430680 /* WALSnapshotTransaction.swift in Sources */, + 567B5BF62AD3285100629622 /* DatabaseReader+dump.swift in Sources */, 5674A6E51F307F0E0095F066 /* DatabaseValueConvertible+Decodable.swift in Sources */, 5656A8652295BD56001FF3FF /* BelongsToAssociation.swift in Sources */, F3BA806A1CFB2E55003DC1BA /* DatabaseQueue.swift in Sources */, @@ -1990,6 +2160,7 @@ F3BA80CC1CFB2FD8003DC1BA /* DatabaseQueueTests.swift in Sources */, F3BA81101CFB3057003DC1BA /* Row+FoundationTests.swift in Sources */, F3BA812C1CFB3064003DC1BA /* RecordMinimalPrimaryKeyRowIDTests.swift in Sources */, + 561F38DB2AC8914D0051EEE9 /* JSONColumnTests.swift in Sources */, 5698AC991DA4B0430056AF8C /* FTS4RecordTests.swift in Sources */, 562EA8321F17B9EB00FA528C /* CompilationSubClassTests.swift in Sources */, 5653EB6C20961FB200F46237 /* AssociationParallelDecodableRecordTests.swift in Sources */, @@ -2005,6 +2176,7 @@ 56FF45591D2CDA5200F21EF9 /* RecordUniqueIndexTests.swift in Sources */, 56B021CC1D8C0D3900B239BB /* MutablePersistableRecordPersistenceConflictPolicyTests.swift in Sources */, 561667041D08A49900ADD404 /* FoundationNSDecimalNumberTests.swift in Sources */, + 567B5C562AD3301800629622 /* DatabaseDumpTests.swift in Sources */, 56419C8F24A51D7D004967E1 /* Map.swift in Sources */, F3BA80E31CFB300F003DC1BA /* DatabaseValueConvertibleSubclassTests.swift in Sources */, 566AD8C91D531BEB002EC1A8 /* TableDefinitionTests.swift in Sources */, @@ -2031,6 +2203,7 @@ F3BA81301CFB3064003DC1BA /* RecordPrimaryKeyRowIDTests.swift in Sources */, 56DAA2D51DE99DAB006E10C8 /* DatabaseCursorTests.swift in Sources */, 5653EB7A20961FB200F46237 /* AssociationParallelRowScopesTests.swift in Sources */, + 561F38F22AC9CE220051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */, 563C67B824628C0C00E94EDC /* DatabasePoolTests.swift in Sources */, F3BA80FB1CFB3021003DC1BA /* StatementArgumentsTests.swift in Sources */, F3BA80EE1CFB3017003DC1BA /* RowAdapterTests.swift in Sources */, @@ -2090,6 +2263,7 @@ 564E73F3203DA2AC000C443C /* JoinSupportTests.swift in Sources */, 563B071B21862F5600B38F35 /* ValueObservationDatabaseValueConvertibleTests.swift in Sources */, 5674A70C1F3087710095F066 /* DatabaseValueConvertibleEncodableTests.swift in Sources */, + 5623B61A2AED39C300436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */, 5698AC8C1DA389380056AF8C /* FTS3TableBuilderTests.swift in Sources */, 5665F868203EF4640084C6C0 /* ColumnInfoTests.swift in Sources */, F3BA81391CFB3064003DC1BA /* RecordWithColumnNameManglingTests.swift in Sources */, @@ -2136,6 +2310,7 @@ 5665FA3D2129EED8004D8612 /* DatabaseDateEncodingStrategyTests.swift in Sources */, 564D4F9A261E1E0200F55856 /* CaseInsensitiveIdentifierTests.swift in Sources */, 56677C27241E6EA20050755D /* ValueObservationRecorder.swift in Sources */, + 567B5C572AD3301800629622 /* DatabaseReaderDumpTests.swift in Sources */, 563B8F9E249E8AB0007A48C9 /* ValueObservationPrintTests.swift in Sources */, 56419C8B24A51D7D004967E1 /* NextOne.swift in Sources */, 5644DE7F20F8D1D1001FFDDE /* DatabaseValueConversionErrorTests.swift in Sources */, @@ -2158,10 +2333,13 @@ 5698AC431DA2BED90056AF8C /* FTS3PatternTests.swift in Sources */, 563B533B267E2FA4009549B5 /* TableTests.swift in Sources */, 5653EB6E20961FB200F46237 /* AssociationBelongsToSQLDerivationTests.swift in Sources */, + 56B6AB092BD3DCE0009A0B71 /* SingletonUserDefaultsTest.swift in Sources */, + 561F38F62AC9CE5A0051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */, 564CE5C621B8FFE600652B19 /* DatabaseRegionObservationTests.swift in Sources */, F3BA80E11CFB300F003DC1BA /* DatabaseValueConversionTests.swift in Sources */, 5623931B1DECC02000A6B01F /* RowFetchTests.swift in Sources */, 564F9C211F069B4E00877A00 /* DatabaseAggregateTests.swift in Sources */, + 5623B6192AED39C300436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */, F3BA80ED1CFB3017003DC1BA /* RowFromDictionaryTests.swift in Sources */, 5690C3291D23E6D800E59934 /* FoundationDateComponentsTests.swift in Sources */, 5657AB391D108BA9006283EF /* FoundationDataTests.swift in Sources */, @@ -2190,6 +2368,7 @@ 568068341EBBA26100EFB8AA /* SQLRequestTests.swift in Sources */, 5657AB691D108BA9006283EF /* FoundationURLTests.swift in Sources */, 56741EAB1E66A8B3003E422D /* FetchRequestTests.swift in Sources */, + 56AFEF3A2996B9EE00CA1E51 /* TransactionDateTests.swift in Sources */, 56B964D61DA521450002DA19 /* FTS5TableBuilderTests.swift in Sources */, 563F4CB4242F7F140052E96C /* ValueObservationTests.swift in Sources */, 5674A71D1F30A8DF0095F066 /* MutablePersistableRecordEncodableTests.swift in Sources */, @@ -2198,8 +2377,10 @@ 562393511DEDFEFB00A6B01F /* EnumeratedCursorTests.swift in Sources */, 56AE6428222AACE300AD1B0B /* AssociationHasOneThroughSQLTests.swift in Sources */, F3BA812D1CFB3064003DC1BA /* RecordMinimalPrimaryKeySingleTests.swift in Sources */, + 567B5C042AD328D900629622 /* DatabaseColumnEncodingStrategyTests.swift in Sources */, 5698AC831DA380A20056AF8C /* VirtualTableModuleTests.swift in Sources */, F3BA811B1CFB305F003DC1BA /* FetchableRecord+QueryInterfaceRequestTests.swift in Sources */, + 5603CED52AC8643800CF097D /* JSONExpressionsTests.swift in Sources */, F3BA811A1CFB305F003DC1BA /* Record+QueryInterfaceRequestTests.swift in Sources */, F3BA81111CFB3057003DC1BA /* StatementArguments+FoundationTests.swift in Sources */, F3BA81371CFB3064003DC1BA /* RecordInitializersTests.swift in Sources */, @@ -2210,6 +2391,7 @@ 56419C8124A51D6E004967E1 /* DatabaseRegionObservationPublisherTests.swift in Sources */, 561CFAA42376EF59000C8BAA /* AssociationHasManyThroughOrderingTests.swift in Sources */, 56ED8A7F1DAB8D6800BD0ABC /* FTS5WrapperTokenizerTests.swift in Sources */, + 568C3F7D2A5AB2D500A2309D /* ForeignKeyDefinitionTests.swift in Sources */, 5698AD011DAA8ACB0056AF8C /* FTS5CustomTokenizerTests.swift in Sources */, F3BA80B51CFB2FCA003DC1BA /* DatabaseQueueInMemoryTests.swift in Sources */, 56A4CDB31D4234B200B1A9B9 /* SQLExpressionLiteralTests.swift in Sources */, diff --git a/GRDB~dark.png b/GRDB~dark.png new file mode 100644 index 0000000000..c36937c708 Binary files /dev/null and b/GRDB~dark.png differ diff --git a/LICENSE b/LICENSE index 8318870b11..922a3e13a4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (C) 2015-2020 Gwendal Roué +Copyright (C) 2015-2024 Gwendal Roué Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: diff --git a/Makefile b/Makefile index 0dc02d847d..819e8dcae7 100644 --- a/Makefile +++ b/Makefile @@ -52,11 +52,17 @@ MAX_IOS_DESTINATION := $(shell xcrun simctl list -j devices available | Scripts/ MIN_TVOS_DESTINATION := $(shell xcrun simctl list -j devices available | Scripts/destination.rb | grep tvOS | sort -n | head -1 | cut -wf 3 | sed 's/\(.*\)/"platform=tvOS Simulator,id=\1"/') MAX_TVOS_DESTINATION := $(shell xcrun simctl list -j devices available | Scripts/destination.rb | grep tvOS | sort -rn | head -1 | cut -wf 3 | sed 's/\(.*\)/"platform=tvOS Simulator,id=\1"/') -# If xcpretty is available, use it for xcodebuild output -XCPRETTY = -XCPRETTY_PATH := $(shell command -v xcpretty 2> /dev/null) -ifdef XCPRETTY_PATH - XCPRETTY = | xcpretty -c + # If xcbeautify or xcpretty is available, use it for xcodebuild output, except in CI. +XCPRETTY = +ifeq ($(CI),true) +else + XCBEAUTIFY_PATH := $(shell command -v xcbeautify 2> /dev/null) + XCPRETTY_PATH := $(shell command -v xcpretty 2> /dev/null) + ifdef XCBEAUTIFY_PATH + XCPRETTY = | xcbeautify + else ifdef XCPRETTY_PATH + XCPRETTY = | xcpretty -c + endif endif # ===== @@ -69,8 +75,8 @@ test_framework_darwin: test_framework_GRDB test_framework_GRDBCustom test_framew test_framework_GRDB: test_framework_GRDBOSX test_framework_GRDBiOS test_framework_GRDBtvOS test_framework_GRDBCustom: test_framework_GRDBCustomSQLiteOSX test_framework_GRDBCustomSQLiteiOS test_framework_SQLCipher: test_framework_SQLCipher3 test_framework_SQLCipher3Encrypted test_framework_SQLCipher4 test_framework_SQLCipher4Encrypted -test_archive: test_archive_GRDBOSX_xcframework -test_install: test_install_manual test_install_SPM test_install_customSQLite test_install_GRDB_CocoaPods test_CocoaPodsLint +test_archive: test_universal_xcframework +test_install: test_install_manual test_install_SPM test_install_customSQLite test_install_GRDB_CocoaPods test_CocoaPodsLint: test_CocoaPodsLint_GRDB test_demo_apps: test_GRDBDemoiOS test_GRDBCombineDemo test_GRDBAsyncDemo @@ -128,6 +134,7 @@ test_framework_GRDBCustomSQLiteOSX: SQLiteCustom $(XCODEBUILD) \ -project GRDBCustom.xcodeproj \ -scheme GRDBCustom \ + -destination "platform=macOS" \ $(TEST_ACTIONS) \ $(XCPRETTY) @@ -212,22 +219,51 @@ test_SPM: $(SWIFT) build -c release set -o pipefail && $(SWIFT) test --parallel -test_archive_GRDBOSX_xcframework: +test_universal_xcframework: rm -rf Tests/products mkdir Tests/products $(XCODEBUILD) archive \ -project GRDB.xcodeproj \ -scheme GRDB \ - -configuration Release \ + -destination "generic/platform=iOS" \ + OTHER_SWIFT_FLAGS=$(OTHER_SWIFT_FLAGS) \ + GCC_PREPROCESSOR_DEFINITIONS=$(GCC_PREPROCESSOR_DEFINITIONS) \ + -archivePath "$(PWD)/Tests/products/GRDB-iOS.xcarchive" \ + SKIP_INSTALL=NO \ + BUILD_LIBRARY_FOR_DISTRIBUTION=YES + $(XCODEBUILD) archive \ + -project GRDB.xcodeproj \ + -scheme GRDB \ + -destination "generic/platform=iOS Simulator" \ + OTHER_SWIFT_FLAGS=$(OTHER_SWIFT_FLAGS) \ + GCC_PREPROCESSOR_DEFINITIONS=$(GCC_PREPROCESSOR_DEFINITIONS) \ + -archivePath "$(PWD)/Tests/products/GRDB-iOS_Simulator.xcarchive" \ + SKIP_INSTALL=NO \ + BUILD_LIBRARY_FOR_DISTRIBUTION=YES + $(XCODEBUILD) archive \ + -project GRDB.xcodeproj \ + -scheme GRDB \ -destination "generic/platform=macOS" \ OTHER_SWIFT_FLAGS=$(OTHER_SWIFT_FLAGS) \ GCC_PREPROCESSOR_DEFINITIONS=$(GCC_PREPROCESSOR_DEFINITIONS) \ - -archivePath "$(PWD)/Tests/products/GRDB.xcarchive" \ + -archivePath "$(PWD)/Tests/products/GRDB-macOS.xcarchive" \ + SKIP_INSTALL=NO \ + BUILD_LIBRARY_FOR_DISTRIBUTION=YES + $(XCODEBUILD) archive \ + -project GRDB.xcodeproj \ + -scheme GRDB \ + -destination "generic/platform=macOS,variant=Mac Catalyst" \ + OTHER_SWIFT_FLAGS=$(OTHER_SWIFT_FLAGS) \ + GCC_PREPROCESSOR_DEFINITIONS=$(GCC_PREPROCESSOR_DEFINITIONS) \ + -archivePath "$(PWD)/Tests/products/GRDB-Mac_Catalyst.xcarchive" \ SKIP_INSTALL=NO \ BUILD_LIBRARY_FOR_DISTRIBUTION=YES $(XCODEBUILD) -create-xcframework \ - -framework '$(PWD)/Tests/products/GRDB.xcarchive/Products/Library/Frameworks/GRDB.framework' \ - -output '$(PWD)/Tests/products/GRDB.xcframework' + -archive '$(PWD)/Tests/products/GRDB-iOS.xcarchive' -framework GRDB.framework \ + -archive '$(PWD)/Tests/products/GRDB-iOS_Simulator.xcarchive' -framework GRDB.framework \ + -archive '$(PWD)/Tests/products/GRDB-macOS.xcarchive' -framework GRDB.framework \ + -archive '$(PWD)/Tests/products/GRDB-Mac_Catalyst.xcarchive' -framework GRDB.framework \ + -output '$(PWD)/Tests/products/GRDB.xcframework' test_install_manual: $(XCODEBUILD) \ @@ -249,6 +285,7 @@ test_install_SPM_Project: $(XCODEBUILD) \ -project Tests/SPM/PlainProject/Plain.xcodeproj \ -scheme Plain \ + -destination "platform=macOS" \ -configuration Release \ clean build \ $(XCPRETTY) @@ -266,6 +303,7 @@ test_install_SPM_macos_release: $(XCODEBUILD) \ -project Tests/SPM/macos/macos.xcodeproj \ -scheme macos \ + -destination "platform=macOS" \ -configuration Release \ clean build \ $(XCPRETTY) @@ -283,6 +321,7 @@ test_install_customSQLite: SQLiteCustom $(XCODEBUILD) \ -project Tests/CustomSQLite/CustomSQLite.xcodeproj \ -scheme CustomSQLite \ + -destination "platform=macOS" \ -configuration Release \ clean build \ $(XCPRETTY) @@ -357,6 +396,7 @@ test_performance: $(XCODEBUILD) \ -project Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj \ -scheme GRDBOSXPerformanceComparisonTests \ + -destination "platform=macOS" \ build-for-testing test-without-building # Target that setups SQLite custom builds with extra compilation options. diff --git a/Package.swift b/Package.swift index de557ee0d5..40754a4bfe 100644 --- a/Package.swift +++ b/Package.swift @@ -4,13 +4,17 @@ import Foundation import PackageDescription -// Don't rely on those environment variables. They are ONLY testing conveniences: -// $ SQLITE_ENABLE_FTS5=1 SQLITE_ENABLE_PREUPDATE_HOOK=1 make test_SPM -var swiftSettings: [SwiftSetting] = [] +var swiftSettings: [SwiftSetting] = [ + .define("SQLITE_ENABLE_FTS5"), +] var cSettings: [CSetting] = [] -if ProcessInfo.processInfo.environment["SQLITE_ENABLE_FTS5"] == "1" { - swiftSettings.append(.define("SQLITE_ENABLE_FTS5")) -} +var dependencies: [PackageDescription.Package.Dependency] = [] + +// For Swift 5.8+ +//swiftSettings.append(.enableUpcomingFeature("ExistentialAny")) + +// Don't rely on those environment variables. They are ONLY testing conveniences: +// $ SQLITE_ENABLE_PREUPDATE_HOOK=1 make test_SPM if ProcessInfo.processInfo.environment["SQLITE_ENABLE_PREUPDATE_HOOK"] == "1" { swiftSettings.append(.define("SQLITE_ENABLE_PREUPDATE_HOOK")) cSettings.append(.define("GRDB_SQLITE_ENABLE_PREUPDATE_HOOK")) @@ -55,7 +59,6 @@ let sqlCipherCSettings = cSettings + [ // for more information. // // SPI_BUILDER also enables the `make docs-localhost` command. -var dependencies: [PackageDescription.Package.Dependency] = [] if ProcessInfo.processInfo.environment["SPI_BUILDER"] == "1" { dependencies.append(.package(url: "https://github.com/apple/swift-docc-plugin", from: "1.0.0")) } @@ -79,6 +82,7 @@ let package = Package( name: "GRDB", dependencies: ["SQLCipher"], path: "GRDB", + resources: [.copy("PrivacyInfo.xcprivacy")], cSettings: cSettings, swiftSettings: swiftSettings), .target( @@ -102,6 +106,7 @@ let package = Package( resources: [ .copy("GRDBTests/Betty.jpeg"), .copy("GRDBTests/InflectionsTests.json"), + .copy("GRDBTests/Issue1383.sqlite"), ], cSettings: cSettings, swiftSettings: swiftSettings) diff --git a/README.md b/README.md index 6d687feed0..fc78a32271 100644 --- a/README.md +++ b/README.md @@ -1,89 +1,143 @@ -![GRDB: A toolkit for SQLite databases, with a focus on application development](https://raw.githubusercontent.com/groue/GRDB.swift/master/GRDB.png) +# GRDB + SQLCipher -

A toolkit for SQLite databases, with a focus on application development

+## What is this? +This is a fork of [GRDB](https://github.com/groue/GRDB.swift) which contains a [SQLCipher Community Edition](https://www.zetetic.net/sqlcipher/open-source/) amalgamation packaged so that it can be consumed as a Swift Package. + +The default branch for this repository is `SQLCipher` so that we can more easily pull upstream changes if we need to. + +## Versioning + +* This Package: *Session-6.27.0* +* GRDB: *6.27.0* +* SQLCipher: *4.6.0* + +## Contributions +We do not accept contributions to this repository at this time. However, feel free to open an issue in order to start a discussion. + +## Updating from Upstream + +Add remote upstream: + +* `git remote add upstream git@github.com:groue/GRDB.swift.git` + +Check out upstream's master branch locally: + +* `git fetch upstream +master:upstream-master && git checkout upstream-master` + +Update upstream's master branch if needed: + +* `git pull upstream master` + +Switch back to the `SQLCipher` branch and merge with upstream-master: + +* `git merge upstream-master` + +Resolve any conflicts that may occur (normally there should be none or only in Package.swift) +and commit the merge. Once done, run `prepare_release.sh` script to fetch and compile the latest tag +of SQLCipher and embed it in GRDB.swift: + +* `./prepare_release.sh` + +The script will also: +* present the summary of updated versions and ask you to pick the new version number for DuckDuckGo GRDB fork, +* test the build, +* create a new release branch and commit changes. + +For versioning, follow [Semantic Versioning Rules](https://semver.org), but note you don't need +to use the same version as GRDB. Examples: + +* Upstream GRDB 5.6.0, after merge -> 5.12.0 + * This project 1.0.0 -> 1.1.0 + +* Upstream GRDB 5.12.0, after merge -> 6.0.0 + * This project 1.1.0 -> 2.0.0 + +If everything looks fine: +* push your branch, +* create PR for BSK referencing the new branch, +* create PRs for iOS and macOS apps referencing your BSK branch. + +Once approved: +* merge your branch back to `SQLCipher`, +* create a tag matching the release number **without the 'v' prefix** (those are reserved for upstream), +* push the tag, +* update the reference to GRDB in BSK to point to a tag. + +### Compiling SQLCipher manually + +In case `prepare_release.sh` script fails, you need to compile SQLCipher amalgamation package +manually. See [general instructions](https://github.com/sqlcipher/sqlcipher#compiling-for-unix-like-systems): + +* Use `./configure --with-crypto-lib=none`. +* Remember to use `make sqlite3.c` and not `make`. +* Copy `sqlite3.c` and `sqlite3.h` to `Sources/SQLCipher/sqlite3.c` and `Sources/SQLCipher/include/sqlite3.h`. + + +-- + + + + + GRDB: A toolkit for SQLite databases, with a focus on application development. + + +

+ A toolkit for SQLite databases, with a focus on application development
+ Proudly serving the community since 2015 +

Swift 5.7 - Platforms License CI Status

---- - -**Latest release**: December 29, 2022 • [version 6.6.0](https://github.com/groue/GRDB.swift/tree/v6.6.0) • [CHANGELOG](CHANGELOG.md) • [Migrating From GRDB 5 to GRDB 6](Documentation/GRDB6MigrationGuide.md) +**Latest release**: April 21, 2024 • [version 6.27.0](https://github.com/groue/GRDB.swift/tree/v6.27.0) • [CHANGELOG](CHANGELOG.md) • [Migrating From GRDB 5 to GRDB 6](Documentation/GRDB6MigrationGuide.md) **Requirements**: iOS 11.0+ / macOS 10.13+ / tvOS 11.0+ / watchOS 4.0+ • SQLite 3.19.3+ • Swift 5.7+ / Xcode 14+ -| Swift version | GRDB version | -| -------------- | ----------------------------------------------------------- | -| **Swift 5.7+** | **v6.6.0** | -| Swift 5.3 | [v5.26.1](https://github.com/groue/GRDB.swift/tree/v5.26.1) | -| Swift 5.2 | [v5.12.0](https://github.com/groue/GRDB.swift/tree/v5.12.0) | -| Swift 5.1 | [v4.14.0](https://github.com/groue/GRDB.swift/tree/v4.14.0) | -| Swift 5 | [v4.14.0](https://github.com/groue/GRDB.swift/tree/v4.14.0) | -| Swift 4.2 | [v4.14.0](https://github.com/groue/GRDB.swift/tree/v4.14.0) | -| Swift 4.1 | [v3.7.0](https://github.com/groue/GRDB.swift/tree/v3.7.0) | -| Swift 4 | [v2.10.0](https://github.com/groue/GRDB.swift/tree/v2.10.0) | -| Swift 3.2 | [v1.3.0](https://github.com/groue/GRDB.swift/tree/v1.3.0) | -| Swift 3.1 | [v1.3.0](https://github.com/groue/GRDB.swift/tree/v1.3.0) | -| Swift 3 | [v1.0](https://github.com/groue/GRDB.swift/tree/v1.0) | -| Swift 2.3 | [v0.81.2](https://github.com/groue/GRDB.swift/tree/v0.81.2) | -| Swift 2.2 | [v0.80.2](https://github.com/groue/GRDB.swift/tree/v0.80.2) | - **Contact**: -- Release announcements and usage tips: follow [@groue](http://twitter.com/groue) on Twitter. +- Release announcements and usage tips: follow [@groue](http://twitter.com/groue) on Twitter, [@groue@hachyderm.io](https://hachyderm.io/@groue) on Mastodon. - Report bugs in a [Github issue](https://github.com/groue/GRDB.swift/issues/new). Make sure you check the [existing issues](https://github.com/groue/GRDB.swift/issues?q=is%3Aopen) first. -- A question? Looking for advice? Do you wonder how to contribute? Fancy a chat? Go to the [GRDB forums](https://forums.swift.org/c/related-projects/grdb), or open a [Github issue](https://github.com/groue/GRDB.swift/issues/new). +- A question? Looking for advice? Do you wonder how to contribute? Fancy a chat? Go to the [GitHub discussions](https://github.com/groue/GRDB.swift/discussions), or the [GRDB forums](https://forums.swift.org/c/related-projects/grdb). -## What is this? +## What is GRDB? -GRDB provides raw access to SQL and advanced SQLite features, because one sometimes enjoys a sharp tool. It has robust concurrency primitives, so that multi-threaded applications can efficiently use their databases. It grants your application models with persistence and fetching methods, so that you don't have to deal with SQL and raw database rows when you don't want to. +Use this library to save your application’s permanent data into SQLite databases. It comes with built-in tools that address common needs: -Compared to [SQLite.swift](https://github.com/stephencelis/SQLite.swift) or [FMDB](https://github.com/ccgus/fmdb), GRDB can spare you a lot of glue code. Compared to [Core Data](https://developer.apple.com/library/content/documentation/Cocoa/Conceptual/CoreData/) or [Realm](http://realm.io), it can simplify your multi-threaded applications. +- **SQL Generation** + + Enhance your application models with persistence and fetching methods, so that you don't have to deal with SQL and raw database rows when you don't want to. -It comes with [up-to-date documentation](#documentation), [general guides](#general-guides--good-practices), and it is [fast](https://github.com/groue/GRDB.swift/wiki/Performance). +- **Database Observation** + + Get notifications when database values are modified. -See [Why Adopt GRDB?](Documentation/WhyAdoptGRDB.md) if you are looking for your favorite database library. +- **Robust Concurrency** + + Multi-threaded applications can efficiently use their databases, including WAL databases that support concurrent reads and writes. + +- **Migrations** + + Evolve the schema of your database as you ship new versions of your application. + +- **Leverage your SQLite skills** + Not all developers need advanced SQLite features. But when you do, GRDB is as sharp as you want it to be. Come with your SQL and SQLite skills, or learn new ones as you go! ---

- FeaturesUsage • - InstallationDocumentation • + InstallationFAQ

--- - -## Features - -Programming tools for both database beginners and SQLite experts: - -- [Access to raw SQL and SQLite](#sqlite-api) -- [Records](#records): Fetching and persistence methods for your custom structs and class hierarchies. -- [Query Interface](#the-query-interface): A swift way to avoid the SQL language. -- [Associations]: Relations and joins between record types. -- [WAL Mode Support](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasepool): Extra performance for multi-threaded applications. -- [Migrations]: Transform your database as your application evolves. -- [Database Observation]: Observe database changes and transactions. -- [Full-Text Search] -- [Encryption](#encryption) -- [Support for Custom SQLite Builds](Documentation/CustomSQLiteBuilds.md) - -In-depth integration with our programming environment: - -- [Swift Concurrency](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency): `try await` your database. -- [SwiftUI](https://github.com/groue/GRDBQuery): Access and observe the database from your SwiftUI views. -- [Combine](Documentation/Combine.md): Access and observe the database with Combine publishers. -- [RxSwift](https://github.com/RxSwiftCommunity/RxGRDB): Access and observe the database with RxSwift observables. - ## Usage
@@ -98,7 +152,7 @@ let dbQueue = try DatabaseQueue(path: "/path/to/database.sqlite") // 2. Define the database schema try dbQueue.write { db in try db.create(table: "player") { t in - t.autoIncrementedPrimaryKey("id") + t.primaryKey("id", .text) t.column("name", .text).notNull() t.column("score", .integer).notNull() } @@ -106,15 +160,15 @@ try dbQueue.write { db in // 3. Define a record type struct Player: Codable, FetchableRecord, PersistableRecord { - var id: Int64 + var id: String var name: String var score: Int } -// 4. Access the database +// 4. Write and read in the database try dbQueue.write { db in - try Player(id: 1, name: "Arthur", score: 100).insert(db) - try Player(id: 2, name: "Barbara", score: 1000).insert(db) + try Player(id: "1", name: "Arthur", score: 100).insert(db) + try Player(id: "2", name: "Barbara", score: 1000).insert(db) } let players: [Player] = try dbQueue.read { db in @@ -124,23 +178,6 @@ let players: [Player] = try dbQueue.read { db in
-
- Activate the WAL mode - -```swift -import GRDB - -// Simple database connection -let dbQueue = try DatabaseQueue(path: "/path/to/database.sqlite") - -// Enhanced multithreading based on SQLite's WAL mode -let dbPool = try DatabasePool(path: "/path/to/database.sqlite") -``` - -See [Database Connections](#database-connections) - -
-
Access to raw SQL @@ -308,7 +345,6 @@ See [Database Observation], [Combine Support], [RxGRDB].
- Documentation ============= @@ -318,16 +354,16 @@ Documentation #### Demo Applications & Frequently Asked Questions - [Demo Applications]: Three flavors: vanilla UIKit, Combine + SwiftUI, and Async/Await + SwiftUI. -- [FAQ]: [Opening Connections](#faq-opening-connections), [Associations](#faq-associations), etc. +- [FAQ] #### Reference -- [GRDB Reference](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/) +- 📖 [GRDB Reference](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/) #### Getting Started - [Installation](#installation) -- [Database Connections](#database-connections): Connect to SQLite databases +- [Database Connections]: Connect to SQLite databases #### SQLite and SQL @@ -342,33 +378,29 @@ Documentation - [Migrations]: Transform your database as your application evolves. - [Full-Text Search]: Perform efficient and customizable full-text searches. -- [Joined Queries Support](#joined-queries-support): Consume complex joined queries. - [Database Observation]: Observe database changes and transactions. - [Encryption](#encryption): Encrypt your database with SQLCipher. - [Backup](#backup): Dump the content of a database to another. - [Interrupt a Database](#interrupt-a-database): Abort any pending database operation. -- [Sharing a Database]: Recommendations for App Group Containers and sandboxed macOS apps. +- [Sharing a Database]: How to share an SQLite database between multiple processes - recommendations for App Group containers, App Extensions, App Sandbox, and file coordination. #### Good to Know +- [Concurrency]: How to access databases in a multi-threaded application. +- [Combine](Documentation/Combine.md): Access and observe the database with Combine publishers. - [Avoiding SQL Injection](#avoiding-sql-injection) - [Error Handling](#error-handling) - [Unicode](#unicode) - [Memory Management](#memory-management) -- [Data Protection](#data-protection) -- [Concurrency] - -#### General Guides & Good Practices - -- :bulb: [Good Practices for Designing Record Types](Documentation/GoodPracticesForDesigningRecordTypes.md) +- [Data Protection](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections) - :bulb: [Migrating From GRDB 5 to GRDB 6](Documentation/GRDB6MigrationGuide.md) -- :bulb: [Issues tagged "best practices"](https://github.com/groue/GRDB.swift/issues?q=is%3Aissue+label%3A%22best+practices%22) -- :question: [Issues tagged "question"](https://github.com/groue/GRDB.swift/issues?utf8=✓&q=is%3Aissue%20label%3Aquestion) -- :blue_book: [Why Adopt GRDB?](Documentation/WhyAdoptGRDB.md) -- :blue_book: [How to build an iOS application with SQLite and GRDB.swift](https://medium.com/@gwendal.roue/how-to-build-an-ios-application-with-sqlite-and-grdb-swift-d023a06c29b3) -- :blue_book: [Four different ways to handle SQLite concurrency](https://medium.com/@gwendal.roue/four-different-ways-to-handle-sqlite-concurrency-db3bcc74d00e) -- :blue_book: [Unexpected SQLite with Swift](https://hackernoon.com/unexpected-sqlite-with-swift-ddc6343bcbfc) +- :bulb: [Why Adopt GRDB?](Documentation/WhyAdoptGRDB.md) +- :bulb: [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) + +#### Companion Libraries +- [GRDBQuery](https://github.com/groue/GRDBQuery): Access and observe the database from your SwiftUI views. +- [GRDBSnapshotTesting](https://github.com/groue/GRDBSnapshotTesting): Test your database. **[FAQ]** @@ -384,8 +416,6 @@ See [Encryption](#encryption) for the installation procedure of GRDB with SQLCip See [Custom SQLite builds](Documentation/CustomSQLiteBuilds.md) for the installation procedure of GRDB with a customized build of SQLite. -See [Enabling FTS5 Support](Documentation/FullTextSearch.md#enabling-fts5-support) for the installation procedure of GRDB with support for the FTS5 full-text engine. - ## CocoaPods @@ -444,7 +474,9 @@ The differences are: - Database pools open your SQLite database in the [WAL mode](https://www.sqlite.org/wal.html) (unless read-only). - Database queues support [in-memory databases](https://www.sqlite.org/inmemorydb.html). -**If you are not sure, choose `DatabaseQueue`.** You will always be able to switch to `DatabasePool` later. +**If you are not sure, choose [`DatabaseQueue`].** You will always be able to switch to [`DatabasePool`] later. + +For more information and tips when opening connections, see [Database Connections](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections). SQLite API @@ -462,22 +494,22 @@ SQLite API - [Date and DateComponents](#date-and-datecomponents) - [NSNumber, NSDecimalNumber, and Decimal](#nsnumber-nsdecimalnumber-and-decimal) - [Swift enums](#swift-enums) - - [Custom Value Types](#custom-value-types) + - [`DatabaseValueConvertible`]: the protocol for custom value types - [Transactions and Savepoints] - [SQL Interpolation] Advanced topics: -- [Prepared Statements](#prepared-statements) +- [Prepared Statements] - [Custom SQL Functions and Aggregates](#custom-sql-functions-and-aggregates) - [Database Schema Introspection](#database-schema-introspection) -- [Row Adapters](#row-adapters) +- [Row Adapters](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/rowadapter) - [Raw SQLite Pointers](#raw-sqlite-pointers) ## Executing Updates -Once granted with a [database connection](#database-connections), the `execute` method executes the SQL statements that do not return any database row, such as `CREATE TABLE`, `INSERT`, `DELETE`, `ALTER`, etc. +Once granted with a [database connection], the [`execute(sql:arguments:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/execute(sql:arguments:)) method executes the SQL statements that do not return any database row, such as `CREATE TABLE`, `INSERT`, `DELETE`, `ALTER`, etc. For example: @@ -501,14 +533,16 @@ try dbQueue.write { db in } ``` -The `?` and colon-prefixed keys like `:score` in the SQL query are the **statements arguments**. You pass arguments with arrays or dictionaries, as in the example above. See [Values](#values) for more information on supported arguments types (Bool, Int, String, Date, Swift enums, etc.), and [StatementArguments](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments) for a detailed documentation of SQLite arguments. +The `?` and colon-prefixed keys like `:score` in the SQL query are the **statements arguments**. You pass arguments with arrays or dictionaries, as in the example above. See [Values](#values) for more information on supported arguments types (Bool, Int, String, Date, Swift enums, etc.), and [`StatementArguments`] for a detailed documentation of SQLite arguments. -You can also embed query arguments right into your SQL queries, with the `literal` argument label, as in the example below. See [SQL Interpolation] for more details. +You can also embed query arguments right into your SQL queries, with [`execute(literal:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/execute(literal:)), as in the example below. See [SQL Interpolation] for more details. ```swift try dbQueue.write { db in + let name = "O'Brien" + let score = 550 try db.execute(literal: """ - INSERT INTO player (name, score) VALUES (\("O'Brien"), \(550)) + INSERT INTO player (name, score) VALUES (\(name), \(score)) """) } ``` @@ -551,9 +585,9 @@ try db.execute(literal: """ """) ``` -When you want to make sure that a single statement is executed, use [Prepared Statements](#prepared-statements). +When you want to make sure that a single statement is executed, use a prepared [`Statement`]. -**After an INSERT statement**, you can get the row ID of the inserted row: +**After an INSERT statement**, you can get the row ID of the inserted row with [`lastInsertedRowID`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/lastinsertedrowid): ```swift try db.execute( @@ -573,7 +607,7 @@ let playerId = player.id ## Fetch Queries -[Database connections](#database-connections) let you fetch database rows, plain values, and custom models aka "records". +[Database connections] let you fetch database rows, plain values, and custom models aka "records". **Rows** are the raw results of SQL queries: @@ -649,32 +683,12 @@ try Row.fetchOne(...) // Row? let count = try Int.fetchOne(db, sql: "SELECT COUNT(*) ...") // Int? ``` -**All those fetching methods require an SQL string that contains a single SQL statement.** When you want to fetch from multiple statements joined with a semicolon, iterate the multiple [prepared statements](#prepared-statements) found in the SQL string: - -```swift -let statements = try db.allStatements(sql: """ - SELECT ...; - SELECT ...; - SELECT ...; - """) -while let statement = try statements.next() { - let players = try Player.fetchAll(statement) -} -``` - -You can join the results of all statements yielded by the `allStatements` method, like the SQLite [`sqlite3_exec`](https://www.sqlite.org/c3ref/exec.html) function: - -```swift -// A single cursor of all rows from all statements -let rows = try db - .allStatements(sql: "...") - .flatMap { statement in try Row.fetchCursor(statement) } -``` - -See [prepared statements](#prepared-statements) for more information about `allStatements()`. +**All those fetching methods require an SQL string that contains a single SQL statement.** When you want to fetch from multiple statements joined with a semicolon, iterate the multiple [prepared statements] found in the SQL string. ### Cursors +📖 [`Cursor`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/cursor) + **Whenever you consume several rows from the database, you can fetch an Array, a Set, or a Cursor**. The `fetchAll()` and `fetchSet()` methods return regular Swift array and sets, that you iterate like all other arrays and sets: @@ -800,6 +814,7 @@ If you don't see, or don't care about the difference, use arrays. If you care ab - [Column Values](#column-values) - [DatabaseValue](#databasevalue) - [Rows as Dictionaries](#rows-as-dictionaries) +- 📖 [`Row`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/row) #### Fetching Rows @@ -838,7 +853,7 @@ let rows = try Row.fetchAll(db, arguments: ["name": "Arthur"]) ``` -See [Values](#values) for more information on supported arguments types (Bool, Int, String, Date, Swift enums, etc.), and [StatementArguments](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments) for a detailed documentation of SQLite arguments. +See [Values](#values) for more information on supported arguments types (Bool, Int, String, Date, Swift enums, etc.), and [`StatementArguments`] for a detailed documentation of SQLite arguments. Unlike row arrays that contain copies of the database rows, row cursors are close to the SQLite metal, and require a little care: @@ -967,9 +982,11 @@ Generally speaking, you can extract the type you need, provided it can be conver #### DatabaseValue -**DatabaseValue is an intermediate type between SQLite and your values, which gives information about the raw value stored in the database.** +📖 [`DatabaseValue`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalue) + +**`DatabaseValue` is an intermediate type between SQLite and your values, which gives information about the raw value stored in the database.** -You get DatabaseValue just like other value types: +You get `DatabaseValue` just like other value types: ```swift let dbValue: DatabaseValue = row[0] @@ -991,7 +1008,7 @@ case .blob(let data): print("Data: \(data)") } ``` -You can extract regular [values](#values) (Bool, Int, String, Date, Swift enums, etc.) from DatabaseValue with the [DatabaseValueConvertible.fromDatabaseValue()](#custom-value-types) method: +You can extract regular [values](#values) (Bool, Int, String, Date, Swift enums, etc.) from `DatabaseValue` with the [fromDatabaseValue()](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible/fromdatabasevalue(_:)-21zzv) method: ```swift let dbValue: DatabaseValue = row["bookCount"] @@ -1077,6 +1094,8 @@ See the documentation of [`Dictionary.init(_:uniquingKeysWith:)`](https://develo ### Value Queries +📖 [`DatabaseValueConvertible`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible) + **Instead of rows, you can directly fetch values.** There are many supported [value types](#values) (Bool, Int, String, Date, Swift enums, etc.). Like rows, fetch values as **cursors**, **arrays**, **sets**, or **single** values (see [fetching methods](#fetching-methods)). Values are extracted from the leftmost column of the SQL queries: @@ -1144,7 +1163,7 @@ GRDB ships with built-in support for the following value types: - **Full-Text Patterns**: [FTS3Pattern](Documentation/FullTextSearch.md#fts3pattern) and [FTS5Pattern](Documentation/FullTextSearch.md#fts5pattern). -- Generally speaking, all types that adopt the [DatabaseValueConvertible](#custom-value-types) protocol. +- Generally speaking, all types that adopt the [`DatabaseValueConvertible`] protocol. Values can be used as [statement arguments](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments): @@ -1303,7 +1322,7 @@ if let row = try Row.fetchOne(db, ...) { } ``` -See also [Codable Records] for more date customization options, and [DatabaseValueConvertible](#custom-value-types) if you want to define a Date-wrapping type with customized database representation. +See also [Codable Records] for more date customization options, and [`DatabaseValueConvertible`] if you want to define a Date-wrapping type with customized database representation. #### DateComponents @@ -1455,168 +1474,6 @@ if dbValue.isNull { ``` -### Custom Value Types - -Conversion to and from the database is based on the `DatabaseValueConvertible` protocol: - -```swift -protocol DatabaseValueConvertible { - /// Returns a value that can be stored in the database. - var databaseValue: DatabaseValue { get } - - /// Returns a value initialized from dbValue, if possible. - static func fromDatabaseValue(_ dbValue: DatabaseValue) -> Self? -} -``` - -All types that adopt this protocol can be used like all other [values](#values) (Bool, Int, String, Date, Swift enums, etc.) - -The `databaseValue` property returns [DatabaseValue](#databasevalue), a type that wraps the five values supported by SQLite: NULL, Int64, Double, String and Data. Since DatabaseValue has no public initializer, use `DatabaseValue.null`, or another type that already adopts the protocol: `1.databaseValue`, `"foo".databaseValue`, etc. Conversion to DatabaseValue *must not* fail. - -The `fromDatabaseValue()` factory method returns an instance of your custom type if the database value contains a suitable value. If the database value does not contain a suitable value, such as "foo" for Date, `fromDatabaseValue` *must* return nil (GRDB will interpret this nil result as a conversion error, and react accordingly). - -Value types that adopt both `DatabaseValueConvertible` and an archival protocol ([Codable, Encodable or Decodable](https://developer.apple.com/documentation/foundation/archives_and_serialization/encoding_and_decoding_custom_types)) are automatically coded and decoded from JSON arrays and objects: - -```swift -// Encoded as a JSON object in the database: -struct Color: Codable, DatabaseValueConvertible { - var r: Double - var g: Double - var b: Double -} -``` - -For such codable value types, GRDB uses the standard [JSONDecoder](https://developer.apple.com/documentation/foundation/jsondecoder) and [JSONEncoder](https://developer.apple.com/documentation/foundation/jsonencoder) from Foundation. By default, Data values are handled with the `.base64` strategy, Date with the `.millisecondsSince1970` strategy, and non conforming floats with the `.throw` strategy. - -In order to customize the JSON format, provide a custom implementation of the `DatabaseValueConvertible` requirements. - -> **Note**: standard sequences such as `Array`, `Set`, or `Dictionary` do not conform to `DatabaseValueConvertible`, even conditionally. You won't be able to directly fetch or store arrays, sets, or dictionaries as JSON database values. You can get free JSON support from these standard types when they are embedded as properties of [Codable Records], though. - - -## Prepared Statements - -**Prepared Statements** let you prepare an SQL query and execute it later, several times if you need, with different arguments. - -```swift -try dbQueue.write { db in - let insertSQL = "INSERT INTO player (name, score) VALUES (:name, :score)" - let insertStatement = try db.makeStatement(sql: insertSQL) - - let selectSQL = "SELECT * FROM player WHERE name = ?" - let selectStatement = try db.makeStatement(sql: selectSQL) -} -``` - -The `?` and colon-prefixed keys like `:name` in the SQL query are the statement arguments. You set them with arrays or dictionaries (arguments are actually of type [StatementArguments](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments), which happens to adopt the ExpressibleByArrayLiteral and ExpressibleByDictionaryLiteral protocols). - -```swift -insertStatement.arguments = ["name": "Arthur", "score": 1000] -selectStatement.arguments = ["Arthur"] -``` - -Alternatively, you can create a prepared statement with [SQL Interpolation]: - -```swift -let insertStatement = try db.makeStatement(literal: "INSERT ...") -let selectStatement = try db.makeStatement(literal: "SELECT ...") -// ~~~~~~~ -``` - -Statements can be executed: - -```swift -try insertStatement.execute() -``` - -Statements can be used wherever a raw SQL query string would fit (see [fetch queries](#fetch-queries)): - -```swift -let rows = try Row.fetchCursor(selectStatement) // A Cursor of Row -let players = try Player.fetchAll(selectStatement) // [Player] -let players = try Player.fetchSet(selectStatement) // Set -let player = try Player.fetchOne(selectStatement) // Player? -``` - -You can set the arguments at the moment of the statement execution: - -```swift -try insertStatement.execute(arguments: ["name": "Arthur", "score": 1000]) -let player = try Player.fetchOne(selectStatement, arguments: ["Arthur"]) -``` - -**When you want to build multiple statements joined with a semicolon**, use the `allStatements` method: - -```swift -let statements = try db.allStatements(sql: """ - INSERT INTO player (name, score) VALUES (?, ?); - INSERT INTO player (name, score) VALUES (?, ?); - """, arguments: ["Arthur", 100, "O'Brien", 1000]) -while let statement = try statements.next() { - try statement.execute() -} -``` - -`allStatements` also supports [SQL Interpolation]: - -```swift -let statements = try db.allStatements(literal: """ - INSERT INTO player (name, score) VALUES (\("Arthur"), \(100)); - INSERT INTO player (name, score) VALUES (\("O'Brien"), \(1000)); - """) -while let statement = try statements.next() { - try statement.execute() -} -``` - -You can turn the [cursor](#cursors) returned from `allStatements` into a regular Swift array, but in this case make sure all individual statements can compile even if the previous ones were not run: - -```swift -// OK: Array of statements -let statements = try Array(db.allStatements(sql: """ - INSERT ...; - UPDATE ...; - SELECT ...; - """)) - -// FAILURE: Can't build an array of statements since -// the INSERT won't compile until CREATE TABLE is run. -let statements = try Array(db.allStatements(sql: """ - CREATE TABLE player ...; - INSERT INTO player ...; - """)) -``` - -See also `Database.execute(sql:)` in the [Executing Updates](#executing-updates) chapter. - -> **Note**: it is a programmer error to reuse a prepared statement that has failed: GRDB may crash if you do so. - -For more information about prepared statements, see the [Statement reference](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statement). - - -### Prepared Statements Cache - -When the same query will be used several times in the lifetime of your application, you may feel a natural desire to cache prepared statements. - -**Don't cache statements yourself.** - -> **Note**: This is because you don't have the necessary tools. Statements are tied to specific SQLite connections and dispatch queues which you don't manage yourself, especially when you use [database pools]. A change in the database schema [may, or may not](https://www.sqlite.org/compile.html#max_schema_retry) invalidate a statement. - -Instead, use the `cachedStatement` method. GRDB does all the hard caching and [memory management](#memory-management) stuff for you: - -```swift -let statement = try db.cachedStatement(sql: sql) -``` - -Cached statements also support [SQL Interpolation]: - -```swift -let statement = try db.cachedStatement(literal: "INSERT ...") -// ~~~~~~~ -``` - -> **Warning**: Should a cached prepared statement throw an error, don't reuse it (it is a programmer error). Instead, reload one from the cache. - - ## Custom SQL Functions and Aggregates **SQLite lets you define SQL functions and aggregates.** @@ -1634,6 +1491,8 @@ SELECT maxLength(name) FROM player; -- custom aggregate ### Custom SQL Functions +📖 [`DatabaseFunction`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasefunction) + A *function* argument takes an array of [DatabaseValue](#databasevalue), and returns any valid [value](#values) (Bool, Int, String, Date, Swift enums, etc.) The number of database values is guaranteed to be *argumentCount*. SQLite has the opportunity to perform additional optimizations when functions are "pure", which means that their result only depends on their arguments. So make sure to set the *pure* argument to true when possible. @@ -1713,6 +1572,8 @@ Player.select(reverseString(nameColumn)) ### Custom Aggregates +📖 [`DatabaseFunction`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasefunction), [`DatabaseAggregate`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseaggregate) + Before registering a custom aggregate, you need to define a type that adopts the `DatabaseAggregate` protocol: ```swift @@ -1821,190 +1682,7 @@ Database.isSQLiteInternalTable(...) Database.isGRDBInternalTable(...) ``` - -## Row Adapters - -**Row adapters let you present database rows in the way expected by the row consumers.** - -They basically help two incompatible row interfaces to work together. For example, a row consumer expects a column named "consumed", but the produced row has a column named "produced". - -In this case, the `ColumnMapping` row adapter comes in handy: - -```swift -// Turn the 'produced' column into 'consumed': -let adapter = ColumnMapping(["consumed": "produced"]) -let row = try Row.fetchOne(db, sql: "SELECT 'Hello' AS produced", adapter: adapter)! - -// [consumed:"Hello"] -print(row) - -// "Hello" -print(row["consumed"]) - -// ▿ [consumed:"Hello"] -// unadapted: [produced:"Hello"] -print(row.debugDescription) - -// [produced:"Hello"] -print(row.unadapted) -``` - -[Record types](#records) are typical row consumers that expect database rows to have a specific layout so that they can decode them: - -```swift -struct MyRecord: Decodable, FetchableRecord { - var consumed: String -} -let record = try MyRecord.fetchOne(db, sql: "SELECT 'Hello' AS produced", adapter: adapter)! -print(record.consumed) // "Hello" -``` - -There are several situations where row adapters are useful: - -- They help disambiguate columns with identical names, which may happen when you select columns from several tables. See [Joined Queries Support](#joined-queries-support) for an example. - -- They help when SQLite outputs unexpected column names, which may happen with some subqueries. See [RenameColumnAdapter](#renamecolumnadapter) for an example. - -Available row adapters are described below. - -- [ColumnMapping](#columnmapping) -- [EmptyRowAdapter](#emptyrowadapter) -- [RangeRowAdapter](#rangerowadapter) -- [RenameColumnAdapter](#renamecolumnadapter) -- [ScopeAdapter](#scopeadapter) -- [SuffixRowAdapter](#suffixrowadapter) - - -### ColumnMapping - -`ColumnMapping` renames columns. Build one with a dictionary whose keys are adapted column names, and values the column names in the raw row: - -```swift -// [newA:0, newB:1] -let adapter = ColumnMapping(["newA": "a", "newB": "b"]) -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c", adapter: adapter)! -``` - -Note that columns that are not present in the dictionary are not present in the resulting adapted row. - - -### EmptyRowAdapter - -`EmptyRowAdapter` hides all columns. - -```swift -let adapter = EmptyRowAdapter() -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c", adapter: adapter)! -row.isEmpty // true -``` - -This limit adapter may turn out useful in some narrow use cases. You'll be happy to find it when you need it. - - -### RangeRowAdapter - -`RangeRowAdapter` only exposes a range of columns. - -```swift -// [b:1] -let adapter = RangeRowAdapter(1..<2) -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c", adapter: adapter)! -``` - - -### RenameColumnAdapter - -`RenameColumnAdapter` lets you transform column names with a function: - -```swift -// [arrr:0, brrr:1, crrr:2] -let adapter = RenameColumnAdapter { column in column + "rrr" } -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c", adapter: adapter)! -``` - -This adapter may turn out useful, for example, when subqueries contain duplicated column names: - -```swift -let sql = "SELECT * FROM (SELECT 1 AS id, 2 AS id)" - -// Prints ["id", "id:1"] -// Note the "id:1" column, generated by SQLite. -let row = try Row.fetchOne(db, sql: sql)! -print(Array(row.columnNames)) - -// Drop the `:...` suffix, and prints ["id", "id"] -let adapter = RenameColumnAdapter { String($0.prefix(while: { $0 != ":" })) } -let adaptedRow = try Row.fetchOne(db, sql: sql, adapter: adapter)! -print(Array(adaptedRow.columnNames)) -``` - - -### ScopeAdapter - -`ScopeAdapter` defines *row scopes*: - -```swift -let adapter = ScopeAdapter([ - "left": RangeRowAdapter(0..<2), - "right": RangeRowAdapter(2..<4)]) -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d", adapter: adapter)! -``` - -ScopeAdapter does not change the columns and values of the fetched row. Instead, it defines *scopes*, which you access through the `Row.scopes` property: - -```swift -row // [a:0 b:1 c:2 d:3] -row.scopes["left"] // [a:0 b:1] -row.scopes["right"] // [c:2 d:3] -row.scopes["missing"] // nil -``` - -Scopes can be nested: - -```swift -let adapter = ScopeAdapter([ - "left": ScopeAdapter([ - "left": RangeRowAdapter(0..<1), - "right": RangeRowAdapter(1..<2)]), - "right": ScopeAdapter([ - "left": RangeRowAdapter(2..<3), - "right": RangeRowAdapter(3..<4)]) - ]) -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d", adapter: adapter)! - -let leftRow = row.scopes["left"]! -leftRow.scopes["left"] // [a:0] -leftRow.scopes["right"] // [b:1] - -let rightRow = row.scopes["right"]! -rightRow.scopes["left"] // [c:2] -rightRow.scopes["right"] // [d:3] -``` - -Any adapter can be extended with scopes: - -```swift -let baseAdapter = RangeRowAdapter(0..<2) -let adapter = ScopeAdapter(base: baseAdapter, scopes: [ - "remainder": SuffixRowAdapter(fromIndex: 2)]) -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c, 3 AS d", adapter: adapter)! - -row // [a:0 b:1] -row.scopes["remainder"] // [c:2 d:3] -``` - -To see how `ScopeAdapter` can be used, see [Joined Queries Support](#joined-queries-support). - - -### SuffixRowAdapter - -`SuffixRowAdapter` hides the first columns in a row: - -```swift -// [b:1 c:2] -let adapter = SuffixRowAdapter(fromIndex: 1) -let row = try Row.fetchOne(db, sql: "SELECT 0 AS a, 1 AS b, 2 AS c", adapter: adapter)! -``` +For more information, see [`tableExists(_:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/tableexists(_:)) and related methods. ## Raw SQLite Pointers @@ -2053,7 +1731,7 @@ try dbQueue.write { db in } ``` -Of course, you need to open a [database connection](#database-connections), and [create database tables](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseschema) first. +Of course, you need to open a [database connection], and [create database tables](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseschema) first. To define your custom records, you subclass the ready-made `Record` class, or you extend your structs and classes with protocols that come with focused sets of features: fetching methods, persistence methods, record comparison... @@ -2061,7 +1739,7 @@ Extending structs with record protocols is more "swifty". Subclassing the Record > **Note**: if you are familiar with Core Data's NSManagedObject or Realm's Object, you may experience a cultural shock: GRDB records are not uniqued, do not auto-update, and do not lazy-load. This is both a purpose, and a consequence of protocol-oriented programming. You should read [How to build an iOS application with SQLite and GRDB.swift](https://medium.com/@gwendal.roue/how-to-build-an-ios-application-with-sqlite-and-grdb-swift-d023a06c29b3) for a general introduction. > -> :bulb: **Tip**: after you have read this chapter, check the [Good Practices for Designing Record Types](Documentation/GoodPracticesForDesigningRecordTypes.md) Guide. +> :bulb: **Tip**: after you have read this chapter, check the [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) Guide. > > :bulb: **Tip**: see the [Demo Applications] for sample apps that uses records. @@ -2080,13 +1758,14 @@ Extending structs with record protocols is more "swifty". Subclassing the Record - [TableRecord Protocol](#tablerecord-protocol) - [PersistableRecord Protocol](#persistablerecord-protocol) - [Persistence Methods] - - [Persistence Methods and the `RETURNING` clause](#persistence-methods-and-the-returning-clause) + - [Persistence Methods and the `RETURNING` clause] - [Persistence Callbacks] - [Identifiable Records] - [Codable Records] - [Record Class](#record-class) - [Record Comparison] - [Record Customization Options] +- [Record Timestamps and Transaction Date] **Records in a Glance** @@ -2266,6 +1945,8 @@ Details follow: ## FetchableRecord Protocol +📖 [`FetchableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/fetchablerecord) + **The FetchableRecord protocol grants fetching methods to any type** that can be built from a database row: ```swift @@ -2335,7 +2016,7 @@ try Place.fetchSet(db, sql: "SELECT ...", arguments:...) // Set try Place.fetchOne(db, sql: "SELECT ...", arguments:...) // Place? ``` -See [fetching methods](#fetching-methods) for information about the `fetchCursor`, `fetchAll`, `fetchSet` and `fetchOne` methods. See [StatementArguments](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments) for more information about the query arguments. +See [fetching methods](#fetching-methods) for information about the `fetchCursor`, `fetchAll`, `fetchSet` and `fetchOne` methods. See [`StatementArguments`] for more information about the query arguments. > **Note**: for performance reasons, the same row argument to `init(row:)` is reused during the iteration of a fetch query. If you want to keep the row for later use, make sure to store a copy: `self.row = row.copy()`. @@ -2344,6 +2025,8 @@ See [fetching methods](#fetching-methods) for information about the `fetchCursor ## TableRecord Protocol +📖 [`TableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerecord) + **The TableRecord protocol** generates SQL for you. To use TableRecord, subclass the [Record](#record-class) class, or adopt it explicitly: ```swift @@ -2400,6 +2083,8 @@ TableRecord can also fetch deal with primary and unique keys: see [Fetching by K ## PersistableRecord Protocol +📖 [`EncodableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/encodablerecord), [`MutablePersistableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/mutablepersistablerecord), [`PersistableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/persistablerecord) + **GRDB record types can create, update, and delete rows in the database.** Those abilities are granted by three protocols: @@ -2710,7 +2395,7 @@ try dbQueue.write { db in } ``` -For extra precision, you can select only the columns you need, and fetch the desired value from the provided [prepared statement](#prepared-statements): +For extra precision, you can select only the columns you need, and fetch the desired value from the provided prepared [`Statement`]: ```swift try dbQueue.write { db in @@ -2880,7 +2565,7 @@ struct Player: Identifiable, FetchableRecord, PersistableRecord { } ``` -When `id` has a [database-compatible type](#values) (Int64, Int, String, UUID, ...), the `Identifiable` conformance unlocks type-safe record and request methods: +When `id` has a [database-compatible type](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible) (Int64, Int, String, UUID, ...), the `Identifiable` conformance unlocks type-safe record and request methods: ```swift let player = try Player.find(db, id: 1) // Player @@ -2898,7 +2583,7 @@ try Player.deleteAll(db, ids: [1, 2, 3]) > **Note**: `Identifiable` is not available on all application targets, and not all tables have a single-column primary key. GRDB provides other methods that deal with primary and unique keys, but they won't check the type of their arguments: > > ```swift -> // Those methods are not type-checked +> // Available on non-Identifiable types > try Player.fetchOne(db, key: 1) > try Player.fetchOne(db, key: ["email": "arthur@example.com"]) > try Country.fetchAll(db, keys: ["FR", "US"]) @@ -2911,6 +2596,26 @@ try Player.deleteAll(db, ids: [1, 2, 3]) > try Player.deleteAll(db, keys: [1, 2, 3]) > ``` +> **Note**: It is not recommended to use `Identifiable` on record types that use an auto-incremented primary key: +> +> ```swift +> // AVOID declaring Identifiable conformance when key is auto-incremented +> struct Player { +> var id: Int64? // Not an id suitable for Identifiable +> var name: String +> var score: Int +> } +> +> extension Player: FetchableRecord, MutablePersistableRecord { +> // Update auto-incremented id upon successful insertion +> mutating func didInsert(_ inserted: InsertionSuccess) { +> id = inserted.rowID +> } +> } +> ``` +> +> For a detailed rationale, please see [issue #1435](https://github.com/groue/GRDB.swift/issues/1435#issuecomment-1740857712). + Some database tables have a single-column primary key which is not called "id": ```swift @@ -2959,7 +2664,7 @@ try dbQueue.write { db in Codable records encode and decode their properties according to their own implementation of the Encodable and Decodable protocols. Yet databases have specific requirements: -- Properties are always coded according to their preferred database representation, when they have one (all [values](#values) that adopt the [DatabaseValueConvertible](#custom-value-types) protocol). +- Properties are always coded according to their preferred database representation, when they have one (all [values](#values) that adopt the [`DatabaseValueConvertible`] protocol). - You can customize the encoding and decoding of dates and uuids. - Complex properties (arrays, dictionaries, nested structs, etc.) are stored as JSON. @@ -2967,7 +2672,7 @@ For more information about Codable records, see: - [JSON Columns] - [Column Names Coding Strategies] -- [Date and UUID Coding Strategies] +- [Data, Date, and UUID Coding Strategies] - [The userInfo Dictionary] - [Tip: Derive Columns from Coding Keys](#tip-derive-columns-from-coding-keys) @@ -3042,9 +2747,9 @@ protocol EncodableRecord { See [DatabaseColumnDecodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasecolumndecodingstrategy) and [DatabaseColumnEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasecolumnencodingstrategy/) to learn about all available strategies. -### Date and UUID Coding Strategies +### Data, Date, and UUID Coding Strategies -By default, [Codable Records] encode and decode their Date and UUID properties as described in the general [Date and DateComponents](#date-and-datecomponents) and [UUID](#uuid) chapters. +By default, [Codable Records] encode and decode their Data properties as blobs, and Date and UUID properties as described in the general [Date and DateComponents](#date-and-datecomponents) and [UUID](#uuid) chapters. To sum up: dates encode themselves in the "YYYY-MM-DD HH:MM:SS.SSS" format, in the UTC time zone, and decode a variety of date formats and timestamps. UUIDs encode themselves as 16-bytes data blobs, and decode both 16-bytes data blobs and strings such as "E621E1F8-C36C-495A-93FC-0C247A3E6E5F". @@ -3052,27 +2757,29 @@ Those behaviors can be overridden: ```swift protocol FetchableRecord { + static var databaseDataDecodingStrategy: DatabaseDataDecodingStrategy { get } static var databaseDateDecodingStrategy: DatabaseDateDecodingStrategy { get } } protocol EncodableRecord { + static var databaseDataEncodingStrategy: DatabaseDataEncodingStrategy { get } static var databaseDateEncodingStrategy: DatabaseDateEncodingStrategy { get } static var databaseUUIDEncodingStrategy: DatabaseUUIDEncodingStrategy { get } } ``` -See [DatabaseDateDecodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedatedecodingstrategy/), [DatabaseDateEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedateencodingstrategy/), and [DatabaseUUIDEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseuuidencodingstrategy/) to learn about all available strategies. +See [DatabaseDataDecodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedatadecodingstrategy/), [DatabaseDateDecodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedatedecodingstrategy/), [DatabaseDataEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedataencodingstrategy/), [DatabaseDateEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedateencodingstrategy/), and [DatabaseUUIDEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseuuidencodingstrategy/) to learn about all available strategies. There is no customization of uuid decoding, because UUID can already decode all its encoded variants (16-bytes blobs and uuid strings, both uppercase and lowercase). -Customized date and uuid handling apply: +Customized coding strategies apply: - When encoding and decoding database rows to and from records (fetching and persistence methods). - In requests by single-column primary key: `fetchOne(_:id:)`, `filter(id:)`, `deleteAll(_:keys:)`, etc. -*They do not apply* in other requests based on date or uuid values. +*They do not apply* in other requests based on data, date, or uuid values. -So make sure that dates and uuids are properly encoded in your requests. For example: +So make sure that those are properly encoded in your requests. For example: ```swift struct Player: Codable, FetchableRecord, PersistableRecord, Identifiable { @@ -3176,7 +2883,7 @@ extension Player: FetchableRecord, PersistableRecord { } ``` -See the [query interface](#the-query-interface) and [Good Practices for Designing Record Types](Documentation/GoodPracticesForDesigningRecordTypes.md) for further information. +See the [query interface](#the-query-interface) and [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) for further information. ## Record Class @@ -3845,11 +3552,11 @@ This is the list of record methods, along with their required protocols. The [Re | **[Codable Records]** | | | | `Type.databaseDecodingUserInfo` | [FetchableRecord] | [*](#the-userinfo-dictionary) | | `Type.databaseJSONDecoder(for:)` | [FetchableRecord] | [*](#json-columns) | -| `Type.databaseDateDecodingStrategy` | [FetchableRecord] | [*](#date-and-uuid-coding-strategies) | +| `Type.databaseDateDecodingStrategy` | [FetchableRecord] | [*](#data-date-and-uuid-coding-strategies) | | `Type.databaseEncodingUserInfo` | [EncodableRecord] | [*](#the-userinfo-dictionary) | | `Type.databaseJSONEncoder(for:)` | [EncodableRecord] | [*](#json-columns) | -| `Type.databaseDateEncodingStrategy` | [EncodableRecord] | [*](#date-and-uuid-coding-strategies) | -| `Type.databaseUUIDEncodingStrategy` | [EncodableRecord] | [*](#date-and-uuid-coding-strategies) | +| `Type.databaseDateEncodingStrategy` | [EncodableRecord] | [*](#data-date-and-uuid-coding-strategies) | +| `Type.databaseUUIDEncodingStrategy` | [EncodableRecord] | [*](#data-date-and-uuid-coding-strategies) | | **Define [Associations]** | | | | `Type.belongsTo(...)` | [TableRecord] | [*](Documentation/AssociationsBasics.md) | | `Type.hasMany(...)` | [TableRecord] | [*](Documentation/AssociationsBasics.md) | @@ -3911,7 +3618,7 @@ let count = try request.fetchCount(db) // Int let player = try Player.fetchOne(db, sql: "SELECT * FROM player WHERE id = ?", arguments: [1]) // Player? ``` - See [Prepared Statements](#prepared-statements): + See [`Statement`]: ```swift let statement = try db.makeStatement(sql: "SELECT * FROM player WHERE id = ?") @@ -3952,7 +3659,7 @@ try dbQueue.write { db in } ``` -You need to open a [database connection](#database-connections) before you can query the database. +You need to open a [database connection] before you can query the database. Please bear in mind that the query interface can not generate all possible SQL queries. You may also *prefer* writing SQL, and this is just OK. From little snippets to full queries, your SQL skills are welcome: @@ -4002,6 +3709,8 @@ So don't miss the [SQL API](#sqlite-api). ## Requests +📖 [`QueryInterfaceRequest`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest), [`Table`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/table) + **The query interface requests** let you fetch values from the database: ```swift @@ -4056,7 +3765,7 @@ enum Columns: String, ColumnExpression { You can now build requests with the following methods: `all`, `none`, `select`, `distinct`, `filter`, `matching`, `group`, `having`, `order`, `reversed`, `limit`, `joining`, `including`, `with`. All those methods return another request, which you can further refine by applying another method: `Player.select(...).filter(...).order(...)`. -- `all()`, `none()`: the requests for all rows, or no row. +- [`all()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerecord/all()), [`none()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerecord/none()): the requests for all rows, or no row. ```swift // SELECT * FROM player @@ -4065,21 +3774,21 @@ You can now build requests with the following methods: `all`, `none`, `select`, By default, all columns are selected. See [Columns Selected by a Request]. -- `select(...)` and `select(..., as:)` define the selected columns. See [Columns Selected by a Request]. +- [`select(...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/selectionrequest/select(_:)-30yzl) and [`select(..., as:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/select(_:as:)-282xc) define the selected columns. See [Columns Selected by a Request]. ```swift // SELECT name FROM player Player.select(nameColumn, as: String.self) ``` -- `annotated(with: expression...)` extends the selection. +- [`annotated(with: expression...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/selectionrequest/annotated(with:)-6ehs4) extends the selection. ```swift // SELECT *, (score + bonus) AS total FROM player Player.annotated(with: (scoreColumn + bonusColumn).forKey("total")) ``` -- `annotated(with: aggregate)` extends the selection with [association aggregates](Documentation/AssociationsBasics.md#association-aggregates). +- [`annotated(with: aggregate)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/annotated(with:)-74xfs) extends the selection with [association aggregates](Documentation/AssociationsBasics.md#association-aggregates). ```swift // SELECT team.*, COUNT(DISTINCT player.id) AS playerCount @@ -4089,7 +3798,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Team.annotated(with: Team.players.count) ``` -- `annotated(withRequired: association)` and `annotated(withOptional: association)` extends the selection with [Associations]. +- [`annotated(withRequired: association)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/annotated(withrequired:)) and [`annotated(withOptional: association)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/annotated(withoptional:)) extends the selection with [Associations]. ```swift // SELECT player.*, team.color @@ -4098,14 +3807,14 @@ You can now build requests with the following methods: `all`, `none`, `select`, Player.annotated(withRequired: Player.team.select(colorColumn)) ``` -- `distinct()` performs uniquing. +- [`distinct()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/distinct()) performs uniquing. ```swift // SELECT DISTINCT name FROM player Player.select(nameColumn, as: String.self).distinct() ``` -- `filter(expression)` applies conditions. +- [`filter(expression)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/filteredrequest/filter(_:)) applies conditions. ```swift // SELECT * FROM player WHERE id IN (1, 2, 3) @@ -4115,7 +3824,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Player.filter(nameColumn != nil && heightColumn > 1.75) ``` -- `filter(id:)` and `filter(ids:)` are type-safe methods available on [Identifiable Records]: +- [`filter(id:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(id:)) and [`filter(ids:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(ids:)) are type-safe methods available on [Identifiable Records]: ```swift // SELECT * FROM player WHERE id = 1 @@ -4125,7 +3834,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Country.filter(ids: ["FR", "US"]) ``` -- `filter(key:)` and `filter(keys:)` apply conditions on primary and unique keys: +- [`filter(key:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(key:)-1p9sq) and [`filter(keys:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(keys:)-6ggt1) apply conditions on primary and unique keys: ```swift // SELECT * FROM player WHERE id = 1 @@ -4141,7 +3850,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Player.filter(key: ["email": "arthur@example.com"]) ``` -- `matching(pattern)` performs [full-text search](Documentation/FullTextSearch.md). +- `matching(pattern)` ([FTS3](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/matching(_:)-3s3zr), [FTS5](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/matching(_:)-7c1e8)) performs [full-text search](Documentation/FullTextSearch.md). ```swift // SELECT * FROM document WHERE document MATCH 'sqlite database' @@ -4151,7 +3860,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, When the pattern is nil, no row will match. -- `group(expression, ...)` groups rows. +- [`group(expression, ...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/aggregatingrequest/group(_:)-edak) groups rows. ```swift // SELECT name, MAX(score) FROM player GROUP BY name @@ -4160,7 +3869,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, .group(nameColumn) ``` -- `having(expression)` applies conditions on grouped rows. +- [`having(expression)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/aggregatingrequest/having(_:)) applies conditions on grouped rows. ```swift // SELECT team, MAX(score) FROM player GROUP BY team HAVING MIN(score) >= 1000 @@ -4170,7 +3879,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, .having(min(scoreColumn) >= 1000) ``` -- `having(aggregate)` applies conditions on grouped rows, according to an [association aggregate](Documentation/AssociationsBasics.md#association-aggregates). +- [`having(aggregate)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/having(_:)) applies conditions on grouped rows, according to an [association aggregate](Documentation/AssociationsBasics.md#association-aggregates). ```swift // SELECT team.* @@ -4181,7 +3890,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Team.having(Team.players.count >= 5) ``` -- `order(ordering, ...)` sorts. +- [`order(ordering, ...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/orderedrequest/order(_:)-63rzl) sorts. ```swift // SELECT * FROM player ORDER BY name @@ -4205,7 +3914,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Player.order(scoreColumn).order(nameColumn) ``` -- `reversed()` reverses the eventual orderings. +- [`reversed()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/orderedrequest/reversed()) reverses the eventual orderings. ```swift // SELECT * FROM player ORDER BY score ASC, name DESC @@ -4219,7 +3928,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Player.all().reversed() ``` -- `limit(limit, offset: offset)` limits and pages results. +- [`limit(limit, offset: offset)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/limit(_:offset:)) limits and pages results. ```swift // SELECT * FROM player LIMIT 5 @@ -4229,7 +3938,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Player.limit(5, offset: 10) ``` -- `joining(...)` and `including(...)` fetch and join records through [Associations]. +- [`joining(required:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/joining(required:)), [`joining(optional:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/joining(optional:)), [`including(required:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/including(required:)), [`including(optional:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/including(optional:)), and [`including(all:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/including(all:)) fetch and join records through [Associations]. ```swift // SELECT player.*, team.* @@ -4238,7 +3947,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Player.including(required: Player.team) ``` -- `with(cte)` embeds a [common table expression]: +- [`with(cte)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/with(_:)) embeds a [common table expression]: ```swift // WITH ... SELECT * FROM player @@ -4248,7 +3957,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, - Other requests that involve the primary key: - - `selectPrimaryKey(as:)` selects the primary key. + - [`selectPrimaryKey(as:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/selectprimarykey(as:)) selects the primary key. ```swift // SELECT id FROM player @@ -4261,7 +3970,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Citizenship.selectPrimaryKey(as: Row.self) // QueryInterfaceRequest ``` - - `orderByPrimaryKey()` sorts by primary key. + - [`orderByPrimaryKey()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/orderbyprimarykey()) sorts by primary key. ```swift // SELECT * FROM player ORDER BY id @@ -4274,7 +3983,7 @@ You can now build requests with the following methods: `all`, `none`, `select`, Citizenship.orderByPrimaryKey() ``` - - `groupByPrimaryKey()` groups rows by primary key. + - [`groupByPrimaryKey()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/groupbyprimarykey()) groups rows by primary key. You can refine requests by chaining those methods: @@ -4365,6 +4074,8 @@ Feed [requests](#requests) with SQL expressions built from your Swift code: ### SQL Operators +📖 [`SQLSpecificExpressible`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/sqlspecificexpressible) + GRDB comes with a Swift version of many SQLite [built-in operators](https://sqlite.org/lang_expr.html#operators), listed below. But not all: see [Embedding SQL in Query Interface Requests] for a way to add support for missing SQL operators. - `=`, `<>`, `<`, `<=`, `>`, `>=`, `IS`, `IS NOT` @@ -4425,6 +4136,15 @@ GRDB comes with a Swift version of many SQLite [built-in operators](https://sqli When the sequence is empty, `joined(operator: .add)` returns 0, and `joined(operator: .multiply)` returns 1. +- `&`, `|`, `~`, `<<`, `>>` + + Bitwise operations (bitwise and, or, not, left shift, right shift) are derived from their Swift equivalent: + + ```swift + // SELECT mask & 2 AS isRocky FROM planet + Planet.select((Column("mask") & 2).forKey("isRocky")) + ``` + - `||` Concatenate several strings: @@ -4624,6 +4344,8 @@ GRDB comes with a Swift version of many SQLite [built-in operators](https://sqli ### SQL Functions +📖 [`SQLSpecificExpressible`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/sqlspecificexpressible) + GRDB comes with a Swift version of many SQLite [built-in functions](https://sqlite.org/lang_corefunc.html), listed below. But not all: see [Embedding SQL in Query Interface Requests] for a way to add support for missing SQL functions. - `ABS`, `AVG`, `COUNT`, `DATETIME`, `JULIANDAY`, `LENGTH`, `MAX`, `MIN`, `SUM`, `TOTAL`: @@ -4646,6 +4368,17 @@ GRDB comes with a Swift version of many SQLite [built-in functions](https://sqli For more information about the functions `dateTime` and `julianDay`, see [Date And Time Functions](https://www.sqlite.org/lang_datefunc.html). +- `CAST` + + Use the `cast` Swift function: + + ```swift + // SELECT (CAST(wins AS REAL) / games) AS successRate FROM player + Player.select((cast(winsColumn, as: .real) / gamesColumn).forKey("successRate")) + ``` + + See [CAST expressions](https://www.sqlite.org/lang_expr.html#castexpr) for more information about SQLite conversions. + - `IFNULL` Use the Swift `??` operator: @@ -5188,7 +4921,7 @@ try Player.customRequest().fetchAll(db) // [Player] } ``` -- The `asRequest(of:)` method changes the type fetched by the request. It is useful, for example, when you use [Associations]: +- The [`asRequest(of:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/asrequest(of:)) method changes the type fetched by the request. It is useful, for example, when you use [Associations]: ```swift struct BookInfo: FetchableRecord, Decodable { @@ -5204,311 +4937,7 @@ try Player.customRequest().fetchAll(db) // [Player] try request.fetchAll(db) ``` -- The `adapted(_:)` method eases the consumption of complex rows with [row adapters](#row-adapters). See [Joined Queries Support](#joined-queries-support) for some sample code that uses this method. - - -## Joined Queries Support - -**GRDB helps consuming joined queries with complex selection.** - -In this chapter, we will focus on the extraction of information from complex rows, such as the ones fetched by the query below: - -```sql --- How to consume the left, middle, and right parts of those rows? -SELECT player.*, team.*, MAX(round.score) AS maxScore -FROM player -LEFT JOIN team ON ... -LEFT JOIN round ON ... -GROUP BY ... -``` - -We will not talk about the *generation* of joined queries, which is covered in [Associations]. - -**So what are we talking about?** - -It is difficult to consume rows fetched from complex joined queries, because they often contain several columns with the same name: `id` from table `player`, `id` from table `team`, etc. - -When such ambiguity happens, GRDB row accessors always favor the leftmost matching column. This means that `row["id"]` would give a player id, without any obvious way to access the team id. - -A classical technique to avoid this ambiguity is to give each column a unique name. For example: - -```sql --- A classical technique -SELECT player.id AS player_id, player.name AS player_name, team.id AS team_id, team.name AS team_name, team.color AS team_color, MAX(round.score) AS maxScore -FROM player -LEFT JOIN team ON ... -LEFT JOIN round ON ... -GROUP BY ... -``` - -This technique works pretty well, but it has three drawbacks: - -1. The selection becomes hard to read and understand. -2. Such queries are difficult to write by hand. -3. The mangled names are a *very* bad fit for [FetchableRecord] types that expect specific column names. After all, if the `Team` record type can read `SELECT * FROM team ...`, it should be able to read `SELECT ..., team.*, ...` as well. - -We thus need another technique. **Below we'll see how to split rows into slices, and preserve column names.** - -`SELECT player.*, team.*, MAX(round.score) AS maxScore FROM ...` will be split into three slices: one that contains player's columns, one that contains team's columns, and a remaining slice that contains remaining column(s). The Player record type will be able to read the first slice, which contains the columns expected by the `Player.init(row:)` initializer. In the same way, the Team record type could read the second slice. - -Unlike the name-mangling technique, splitting rows keeps SQL legible, accepts your hand-crafted SQL queries, and plays as nicely as possible with your existing [record types](#records). - -- [Splitting Rows, an Introduction](#splitting-rows-an-introduction) -- [Splitting Rows, the Record Way](#splitting-rows-the-record-way) -- [Splitting Rows, the Codable Way](#splitting-rows-the-codable-way) - - -### Splitting Rows, an Introduction - -Let's first write some introductory code, hoping that this chapter will make you understand how pieces fall together. We'll see [later](#splitting-rows-the-record-way) how records will help us streamline the initial approach, how to track changes in joined requests, and how we can use the standard Decodable protocol. - -To split rows, we will use [row adapters](#row-adapters). Row adapters adapt rows so that row consumers see exactly the columns they want. Among other things, row adapters can define several *row scopes* that give access to as many *row slices*. Sounds like a perfect match. - -At the very beginning, there is an SQL query: - -```swift -try dbQueue.read { db in - let sql = """ - SELECT player.*, team.*, MAX(round.score) AS maxScore - FROM player - LEFT JOIN team ON ... - LEFT JOIN round ON ... - GROUP BY ... - """ -``` - -We need an adapter that extracts player columns, in a slice that has as many columns as there are columns in the player table. That's [RangeRowAdapter](#rangerowadapter): - -```swift - // SELECT player.*, team.*, ... - // <------> - let playerWidth = try db.columns(in: "player").count - let playerAdapter = RangeRowAdapter(0 ..< playerWidth) -``` - -We also need an adapter that extracts team columns: - -```swift - // SELECT player.*, team.*, ... - // <----> - let teamWidth = try db.columns(in: "team").count - let teamAdapter = RangeRowAdapter(playerWidth ..< (playerWidth + teamWidth)) -``` - -We merge those two adapters in a single [ScopeAdapter](#scopeadapter) that will allow us to access both sliced rows: - -```swift - let playerScope = "player" - let teamScope = "team" - let adapter = ScopeAdapter([ - playerScope: playerAdapter, - teamScope: teamAdapter]) -``` - -And now we can fetch, and start consuming our rows. You already know [row cursors](#fetching-rows): - -```swift - let rows = try Row.fetchCursor(db, sql: sql, adapter: adapter) - while let row = try rows.next() { -``` - -From a fetched row, we can build a player: - -```swift - let player: Player = row[playerScope] -``` - -In the SQL query, the team is joined with the `LEFT JOIN` operator. This means that the team may be missing: its slice may contain team values, or it may only contain NULLs. When this happens, we don't want to build a Team record, and we thus load an *optional* Team: - -```swift - let team: Team? = row[teamScope] -``` - -And finally, we can load the maximum score, assuming that the "maxScore" column is not ambiguous: - -```swift - let maxScore: Int = row["maxScore"] - - print("player: \(player)") - print("team: \(team)") - print("maxScore: \(maxScore)") - } -} -``` - -> :bulb: In this chapter, we have learned: -> -> - how to use `RangeRowAdapter` to extract a specific table's columns into a *row slice*. -> - how to use `ScopeAdapter` to gives access to several row slices through named scopes. -> - how to use Row subscripting to extract records from rows, or optional records in order to deal with left joins. - - -### Splitting Rows, the Record Way - -Our introduction above has introduced important techniques. It uses [row adapters](#row-adapters) in order to split rows. It uses Row subscripting in order to extract records from row slices. - -But we may want to make it more usable and robust: - -1. It's generally easier to consume records than raw rows. -2. Joined records not always need all columns from a table (see `TableRecord.databaseSelection` in [Columns Selected by a Request]). -3. Building row adapters is long and error prone. - -To address the first bullet, let's define a record that holds our player, optional team, and maximum score. Since it can decode database rows, it adopts the [FetchableRecord] protocol: - -```swift -struct PlayerInfo { - var player: Player - var team: Team? - var maxScore: Int -} - -/// PlayerInfo can decode rows: -extension PlayerInfo: FetchableRecord { - private enum Scopes { - static let player = "player" - static let team = "team" - } - - init(row: Row) { - player = row[Scopes.player] - team = row[Scopes.team] - maxScore = row["maxScore"] - } -} -``` - -Now we write a method that returns a [custom request](#custom-requests), and then build the fetching method on top of that request: - -```swift -extension PlayerInfo { - /// The request for all player infos - static func all() -> some FetchRequest { -``` - -To acknowledge that both Player and Team records may customize their selection of the "player" and "team" columns, we'll write our SQL in a slightly different way: - -```swift - // Let Player and Team customize their selection: - let request: SQLRequest = """ - SELECT - \(columnsOf: Player.self), -- instead of player.* - \(columnsOf: Team.self), -- instead of team.* - MAX(round.score) AS maxScore - FROM player - LEFT JOIN team ON ... - LEFT JOIN round ON ... - GROUP BY ... - """ -``` - -Our SQL is no longer a regular String, but an `SQLRequest` which profits from [SQL Interpolation]. Inside this request, `\(columnsOf: Player.self)` outputs `player.*`, unless Player defines a [customized selection](#columns-selected-by-a-request). - -Now we need to build adapters. - -We use the `splittingRowAdapters` global function, whose job is precisely to build row adapters of desired widths: - -And since counting table columns require a database connection, we use the `adapted(_:)` request method. It allows requests to adapt themselves right before execution, when a database connection is available. - -```swift - return request.adapted { db in - let adapters = try splittingRowAdapters(columnCounts: [ - Player.numberOfSelectedColumns(db), - Team.numberOfSelectedColumns(db)]) - return ScopeAdapter([ - Scopes.player: adapters[0], - Scopes.team: adapters[1]]) - } - } -``` - -> **Note**: `splittingRowAdapters` returns as many adapters as necessary to fully split a row. In the example above, it returns *three* adapters: one for player, one for team, and one for the remaining columns. - -And finally, we can define the fetching method: - -```swift - /// Fetches all player infos - static func fetchAll(_ db: Database) throws -> [PlayerInfo] { - try all().fetchAll(db) - } -} -``` - -And when your app needs to fetch player infos, it now reads: - -```swift -// Fetch player infos -let playerInfos = try dbQueue.read { db in - try PlayerInfo.fetchAll(db) -} -``` - - -> :bulb: In this chapter, we have learned: -> -> - how to define a `FetchableRecord` record that consumes rows fetched from a joined query. -> - how to use [SQL Interpolation] and `numberOfSelectedColumns` in order to deal with nested record types that define custom selection. -> - how to use `splittingRowAdapters` in order to streamline the definition of row slices. -> - how to gather all relevant methods and constants in a record type, fully responsible of its relationship with the database. - - -### Splitting Rows, the Codable Way - -[Codable Records] build on top of the standard Decodable protocol in order to decode database rows. - -You can consume complex joined queries with Codable records as well. As a demonstration, we'll rewrite the [above](#splitting-rows-the-record-way) sample code: - -```swift -struct Player: Decodable, FetchableRecord, TableRecord { - var id: Int64 - var name: String -} -struct Team: Decodable, FetchableRecord, TableRecord { - var id: Int64 - var name: String - var color: Color -} -struct PlayerInfo: Decodable, FetchableRecord { - var player: Player - var team: Team? - var maxScore: Int -} - -extension PlayerInfo { - /// The request for all player infos - static func all() -> some FetchRequest { - let request: SQLRequest = """ - SELECT - \(columnsOf: Player.self), - \(columnsOf: Team.self), - MAX(round.score) AS maxScore - FROM player - LEFT JOIN team ON ... - LEFT JOIN round ON ... - GROUP BY ... - """ - return request.adapted { db in - let adapters = try splittingRowAdapters(columnCounts: [ - Player.numberOfSelectedColumns(db), - Team.numberOfSelectedColumns(db)]) - return ScopeAdapter([ - CodingKeys.player.stringValue: adapters[0], - CodingKeys.team.stringValue: adapters[1]]) - } - } - - /// Fetches all player infos - static func fetchAll(_ db: Database) throws -> [PlayerInfo] { - try all().fetchAll(db) - } -} - -// Fetch player infos -let playerInfos = try dbQueue.read { db in - try PlayerInfo.fetchAll(db) -} -``` - -> :bulb: In this chapter, we have learned how to use the `Decodable` protocol and its associated `CodingKeys` enum in order to dry up our code. +- The [`adapted(_:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/fetchrequest/adapted(_:)) method eases the consumption of complex rows with row adapters. See [`RowAdapter`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/rowadapter) and [`splittingRowAdapters(columnCounts:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/splittingrowadapters(columncounts:)) for a sample code that uses `adapted(_:)`. Encryption @@ -5538,7 +4967,7 @@ Make sure you remove any existing `pod 'GRDB.swift'` from your Podfile. `GRDB.sw ### Creating or Opening an Encrypted Database -**You create and open an encrypted database** by providing a passphrase to your [database connection](#database-connections): +**You create and open an encrypted database** by providing a passphrase to your [database connection]: ```swift var config = Configuration() @@ -5684,11 +5113,11 @@ For even better control over the lifetime of the passphrase in memory, use a Dat // RECOMMENDED: only load the passphrase when it is needed and reset its content immediately after use var config = Configuration() config.prepareDatabase { db in - let passphrase = try getPassphraseData() // Data + var passphraseData = try getPassphraseData() // Data defer { - passphrase.resetBytes(in: 0../dev/null; then - swiftlint --config "${SRCROOT}/Scripts/swiftlint.yml" + # Ignore swiftlint error, because GRBD has no dependency on any Swiftlint version. + # See https://github.com/groue/GRDB.swift/issues/1327 + swiftlint --config "${SRCROOT}/Scripts/swiftlint.yml" || true else echo "warning: SwiftLint not installed, download from https://github.com/realm/SwiftLint" fi diff --git a/Scripts/swiftlint.yml b/Scripts/swiftlint.yml index dc8568d0c4..af49e16e21 100644 --- a/Scripts/swiftlint.yml +++ b/Scripts/swiftlint.yml @@ -6,13 +6,14 @@ disabled_rules: - cyclomatic_complexity - duplicate_enum_cases - file_length + - for_where - force_cast - force_try - - for_where - function_body_length - function_parameter_count - identifier_name - is_disjoint + - large_tuple - nesting - opening_brace - redundant_optional_initialization @@ -25,11 +26,10 @@ disabled_rules: - unused_setter_value opt_in_rules: - - anyobject_protocol - array_init - - capture_variable - closure_spacing - collection_alignment + - comma_inheritance - contains_over_filter_count - contains_over_filter_is_empty - contains_over_first_not_nil @@ -48,20 +48,30 @@ opt_in_rules: - last_where - legacy_multiple - legacy_random + - local_doc_comment - lower_acl_than_parent - modifier_order - no_extension_access_modifier + - optional_enum_case_matching - overridden_super_call - pattern_matching_keywords - prohibited_super_call - reduce_into + - redundant_self_in_closure - redundant_type_annotation + - shorthand_optional_binding - sorted_first_last - sorted_imports - toggle_bool + - unhandled_throwing_task + - vertical_parameter_alignment_on_call + - yoda_condition + +analyzer_rules: + - capture_variable + - typesafe_array_init - unused_declaration - unused_import - - yoda_condition vertical_whitespace: max_empty_lines: 2 diff --git a/Sources/SQLCipher/include/sqlite3.h b/Sources/SQLCipher/include/sqlite3.h index f8aa7b3456..0911bfc6a8 100644 --- a/Sources/SQLCipher/include/sqlite3.h +++ b/Sources/SQLCipher/include/sqlite3.h @@ -146,9 +146,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.39.4" -#define SQLITE_VERSION_NUMBER 3039004 -#define SQLITE_SOURCE_ID "2022-09-29 15:55:41 a29f9949895322123f7c38fbe94c649a9d6e6c9cd0c3b41c96d694552f26alt1" +#define SQLITE_VERSION "3.45.3" +#define SQLITE_VERSION_NUMBER 3045003 +#define SQLITE_SOURCE_ID "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1ealt1" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -420,6 +420,8 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. **
  • The application must not modify the SQL statement text passed into ** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. +**
  • The application must not dereference the arrays or string pointers +** passed as the 3rd and 4th callback parameters after it returns. ** */ SQLITE_API int sqlite3_exec( @@ -528,6 +530,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) +#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) @@ -563,6 +566,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) +#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) @@ -670,13 +674,17 @@ SQLITE_API int sqlite3_exec( ** ** SQLite uses one of these integer values as the second ** argument to calls it makes to the xLock() and xUnlock() methods -** of an [sqlite3_io_methods] object. +** of an [sqlite3_io_methods] object. These values are ordered from +** lest restrictive to most restrictive. +** +** The argument to xLock() is always SHARED or higher. The argument to +** xUnlock is either SHARED or NONE. */ -#define SQLITE_LOCK_NONE 0 -#define SQLITE_LOCK_SHARED 1 -#define SQLITE_LOCK_RESERVED 2 -#define SQLITE_LOCK_PENDING 3 -#define SQLITE_LOCK_EXCLUSIVE 4 +#define SQLITE_LOCK_NONE 0 /* xUnlock() only */ +#define SQLITE_LOCK_SHARED 1 /* xLock() or xUnlock() */ +#define SQLITE_LOCK_RESERVED 2 /* xLock() only */ +#define SQLITE_LOCK_PENDING 3 /* xLock() only */ +#define SQLITE_LOCK_EXCLUSIVE 4 /* xLock() only */ /* ** CAPI3REF: Synchronization Type Flags @@ -754,7 +762,14 @@ struct sqlite3_file { **
  • [SQLITE_LOCK_PENDING], or **
  • [SQLITE_LOCK_EXCLUSIVE]. ** -** xLock() increases the lock. xUnlock() decreases the lock. +** xLock() upgrades the database file lock. In other words, xLock() moves the +** database file lock in the direction NONE toward EXCLUSIVE. The argument to +** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** SQLITE_LOCK_NONE. If the database file lock is already at or above the +** requested lock, then the call to xLock() is a no-op. +** xUnlock() downgrades the database file lock to either SHARED or NONE. +* If the lock is already at or below the requested lock state, then the call +** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, ** PENDING, or EXCLUSIVE lock on the file. It returns true @@ -859,9 +874,8 @@ struct sqlite3_io_methods { ** opcode causes the xFileControl method to write the current state of ** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], ** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) -** into an integer that the pArg argument points to. This capability -** is used during testing and is only available when the SQLITE_TEST -** compile-time option is used. +** into an integer that the pArg argument points to. +** This capability is only available if SQLite is compiled with [SQLITE_DEBUG]. ** **
  • [[SQLITE_FCNTL_SIZE_HINT]] ** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS @@ -1165,7 +1179,6 @@ struct sqlite3_io_methods { ** in wal mode after the client has finished copying pages from the wal ** file to the database file, but before the *-shm file is updated to ** record the fact that the pages have been checkpointed. -** ** **
  • [[SQLITE_FCNTL_EXTERNAL_READER]] ** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect @@ -1178,10 +1191,16 @@ struct sqlite3_io_methods { ** the database is not a wal-mode db, or if there is no such connection in any ** other process. This opcode cannot be used to detect transactions opened ** by clients within the current process, only within other processes. -** ** **
  • [[SQLITE_FCNTL_CKSM_FILE]] -** Used by the cksmvfs VFS module only. +** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the +** [checksum VFS shim] only. +** +**
  • [[SQLITE_FCNTL_RESET_CACHE]] +** If there is currently no transaction open on the database, and the +** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control +** purges the contents of the in-memory page cache. If there is an open +** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. ** */ #define SQLITE_FCNTL_LOCKSTATE 1 @@ -1224,6 +1243,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_CKPT_START 39 #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 +#define SQLITE_FCNTL_RESET_CACHE 42 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1253,6 +1273,26 @@ typedef struct sqlite3_mutex sqlite3_mutex; */ typedef struct sqlite3_api_routines sqlite3_api_routines; +/* +** CAPI3REF: File Name +** +** Type [sqlite3_filename] is used by SQLite to pass filenames to the +** xOpen method of a [VFS]. It may be cast to (const char*) and treated +** as a normal, nul-terminated, UTF-8 buffer containing the filename, but +** may also be passed to special APIs such as: +** +**
      +**
    • sqlite3_filename_database() +**
    • sqlite3_filename_journal() +**
    • sqlite3_filename_wal() +**
    • sqlite3_uri_parameter() +**
    • sqlite3_uri_boolean() +**
    • sqlite3_uri_int64() +**
    • sqlite3_uri_key() +**
    +*/ +typedef const char *sqlite3_filename; + /* ** CAPI3REF: OS Interface Object ** @@ -1431,7 +1471,7 @@ struct sqlite3_vfs { sqlite3_vfs *pNext; /* Next registered VFS */ const char *zName; /* Name of this virtual file system */ void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, + int (*xOpen)(sqlite3_vfs*, sqlite3_filename zName, sqlite3_file*, int flags, int *pOutFlags); int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); @@ -1618,20 +1658,23 @@ SQLITE_API int sqlite3_os_end(void); ** must ensure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running. ** -** The sqlite3_config() interface -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** ** The first argument to sqlite3_config() is an integer ** [configuration option] that determines ** what property of SQLite is to be configured. Subsequent arguments ** vary depending on the [configuration option] ** in the first argument. ** +** For most configuration options, the sqlite3_config() interface +** may only be invoked prior to library initialization using +** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". +** ^If sqlite3_config() is called after [sqlite3_initialize()] and before +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. +** Note, however, that ^sqlite3_config() can be called as part of the +** implementation of an application-defined [sqlite3_os_init()]. +** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option ** then this routine returns a non-zero [error code]. @@ -1739,6 +1782,23 @@ struct sqlite3_mem_methods { ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. ** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +**
      +**
    • SQLITE_CONFIG_LOG +**
    • SQLITE_CONFIG_PCACHE_HDRSZ +**
    +** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications ** should check the return code from [sqlite3_config()] to make sure that @@ -2069,7 +2129,7 @@ struct sqlite3_mem_methods { ** is stored in each sorted record and the required column values loaded ** from the database as records are returned in sorted order. The default ** value for this option is to never use this optimization. Specifying a -** negative value for this option restores the default behaviour. +** negative value for this option restores the default behavior. ** This option is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option. ** @@ -2083,30 +2143,46 @@ struct sqlite3_mem_methods { ** configuration setting is never used, then the default maximum is determined ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that ** compile-time option is not set, then the default maximum is 1073741824. +** +** [[SQLITE_CONFIG_ROWID_IN_VIEW]] +**
    SQLITE_CONFIG_ROWID_IN_VIEW +**
    The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability +** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is +** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability +** defaults to on. This configuration option queries the current setting or +** changes the setting to off or on. The argument is a pointer to an integer. +** If that integer initially holds a value of 1, then the ability for VIEWs to +** have ROWIDs is activated. If the integer initially holds zero, then the +** ability is deactivated. Any other initial value for the integer leaves the +** setting unchanged. After changes, if any, the integer is written with +** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite +** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and +** recommended case) then the integer is always filled with zero, regardless +** if its initial value. ** */ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ @@ -2114,6 +2190,7 @@ struct sqlite3_mem_methods { #define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */ #define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */ #define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */ +#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */ /* ** CAPI3REF: Database Connection Configuration Options @@ -2147,7 +2224,7 @@ struct sqlite3_mem_methods { ** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words ** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. +** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns ** [SQLITE_BUSY].)^
    @@ -2244,7 +2321,7 @@ struct sqlite3_mem_methods { ** database handle, SQLite checks if this will mean that there are now no ** connections at all to the database. If so, it performs a checkpoint ** operation before closing the connection. This option may be used to -** override this behaviour. The first parameter passed to this operation +** override this behavior. The first parameter passed to this operation ** is an integer - positive to disable checkpoints-on-close, or zero (the ** default) to enable them, and negative to leave the setting unchanged. ** The second parameter is a pointer to an integer @@ -2297,8 +2374,12 @@ struct sqlite3_mem_methods { **
  • sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); ** ** Because resetting a database is destructive and irreversible, the -** process requires the use of this obscure API and multiple steps to help -** ensure that it does not happen by accident. +** process requires the use of this obscure API and multiple steps to +** help ensure that it does not happen by accident. Because this +** feature must be capable of resetting corrupt databases, and +** shutting down virtual tables may require access to that corrupt +** storage, the library must abandon any installed virtual tables +** without calling their xDestroy() methods. ** ** [[SQLITE_DBCONFIG_DEFENSIVE]]
    SQLITE_DBCONFIG_DEFENSIVE
    **
    The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the @@ -2309,6 +2390,7 @@ struct sqlite3_mem_methods { **
      **
    • The [PRAGMA writable_schema=ON] statement. **
    • The [PRAGMA journal_mode=OFF] statement. +**
    • The [PRAGMA schema_version=N] statement. **
    • Writes to the [sqlite_dbpage] virtual table. **
    • Direct writes to [shadow tables]. **
    @@ -2336,7 +2418,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_DQS_DML]] -**
    SQLITE_DBCONFIG_DQS_DML +**
    SQLITE_DBCONFIG_DQS_DML
    **
    The SQLITE_DBCONFIG_DQS_DML option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DML statements ** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The @@ -2345,7 +2427,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_DQS_DDL]] -**
    SQLITE_DBCONFIG_DQS_DDL +**
    SQLITE_DBCONFIG_DQS_DDL
    **
    The SQLITE_DBCONFIG_DQS option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DDL statements, ** such as CREATE TABLE and CREATE INDEX. The @@ -2354,7 +2436,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] -**
    SQLITE_DBCONFIG_TRUSTED_SCHEMA +**
    SQLITE_DBCONFIG_TRUSTED_SCHEMA
    **
    The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to ** assume that database schemas are untainted by malicious content. ** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite @@ -2374,7 +2456,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] -**
    SQLITE_DBCONFIG_LEGACY_FILE_FORMAT +**
    SQLITE_DBCONFIG_LEGACY_FILE_FORMAT
    **
    The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly ** created database file to have a schema format version number (the 4-byte @@ -2383,7 +2465,7 @@ struct sqlite3_mem_methods { ** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, ** newly created databases are generally not understandable by SQLite versions ** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there -** is now scarcely any need to generated database files that are compatible +** is now scarcely any need to generate database files that are compatible ** all the way back to version 3.0.0, and so this setting is of little ** practical use, but is provided so that SQLite can continue to claim the ** ability to generate new database files that are compatible with version @@ -2392,8 +2474,40 @@ struct sqlite3_mem_methods { ** the [VACUUM] command will fail with an obscure error when attempting to ** process a table with generated columns and a descending index. This is ** not considered a bug since SQLite versions 3.3.0 and earlier do not support -** either generated columns or decending indexes. +** either generated columns or descending indexes. +**
    +** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +**
    SQLITE_DBCONFIG_STMT_SCANSTATUS
    +**
    The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +**
    +** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +**
    SQLITE_DBCONFIG_REVERSE_SCANORDER
    +**
    The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. **
    +** ** */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ @@ -2414,7 +2528,9 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ #define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -2636,8 +2752,13 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*); ** ^A call to sqlite3_interrupt(D) that occurs when there are no running ** SQL statements is a no-op and has no effect on SQL statements ** that are started after the sqlite3_interrupt() call returns. +** +** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether +** or not an interrupt is currently in effect for [database connection] D. +** It returns 1 if an interrupt is currently in effect, or 0 otherwise. */ SQLITE_API void sqlite3_interrupt(sqlite3*); +SQLITE_API int sqlite3_is_interrupted(sqlite3*); /* ** CAPI3REF: Determine If An SQL Statement Is Complete @@ -3255,8 +3376,8 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*, **
    ^An SQLITE_TRACE_PROFILE callback provides approximately the same ** information as is provided by the [sqlite3_profile()] callback. ** ^The P argument is a pointer to the [prepared statement] and the -** X argument points to a 64-bit integer which is the estimated of -** the number of nanosecond that the prepared statement took to run. +** X argument points to a 64-bit integer which is approximately +** the number of nanoseconds that the prepared statement took to run. ** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. ** ** [[SQLITE_TRACE_ROW]]
    SQLITE_TRACE_ROW
    @@ -3288,8 +3409,10 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*, ** M argument should be the bitwise OR-ed combination of ** zero or more [SQLITE_TRACE] constants. ** -** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides -** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P) +** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or +** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each +** database connection may have at most one trace callback. ** ** ^The X callback is invoked whenever any of the events identified by ** mask M occur. ^The integer return value from the callback is currently @@ -3319,7 +3442,7 @@ SQLITE_API int sqlite3_trace_v2( ** ** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback ** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for +** [sqlite3_step()] and [sqlite3_prepare()] and similar for ** database connection D. An example use for this ** interface is to keep a GUI updated during a large query. ** @@ -3344,6 +3467,13 @@ SQLITE_API int sqlite3_trace_v2( ** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** +** The progress handler callback would originally only be invoked from the +** bytecode engine. It still might be invoked during [sqlite3_prepare()] +** and similar because those routines might force a reparse of the schema +** which involves running the bytecode engine. However, beginning with +** SQLite version 3.41.0, the progress handler callback might also be +** invoked directly from [sqlite3_prepare()] while analyzing and generating +** code for complex queries. */ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); @@ -3380,13 +3510,18 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** **
    ** ^(
    [SQLITE_OPEN_READONLY]
    -**
    The database is opened in read-only mode. If the database does not -** already exist, an error is returned.
    )^ +**
    The database is opened in read-only mode. If the database does +** not already exist, an error is returned.
    )^ ** ** ^(
    [SQLITE_OPEN_READWRITE]
    -**
    The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.
    )^ +**
    The database is opened for reading and writing if possible, or +** reading only if the file is write protected by the operating +** system. In either case the database must already exist, otherwise +** an error is returned. For historical reasons, if opening in +** read-write mode fails due to OS-level permissions, an attempt is +** made to open it in read-only mode. [sqlite3_db_readonly()] can be +** used to determine whether the database is actually +** read-write.
    )^ ** ** ^(
    [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
    **
    The database is opened for reading and writing, and is created if @@ -3424,6 +3559,9 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); **
    The database is opened [shared cache] enabled, overriding ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ +** The [use of shared cache mode is discouraged] and hence shared cache +** capabilities may be omitted from many builds of SQLite. In such cases, +** this option is a no-op. ** ** ^(
    [SQLITE_OPEN_PRIVATECACHE]
    **
    The database is opened [shared cache] disabled, overriding @@ -3439,7 +3577,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** to return an extended result code.
    ** ** [[OPEN_NOFOLLOW]] ^(
    [SQLITE_OPEN_NOFOLLOW]
    -**
    The database filename is not allowed to be a symbolic link
    +**
    The database filename is not allowed to contain a symbolic link
    **
    )^ ** ** If the 3rd parameter to sqlite3_open_v2() is not one of the @@ -3643,7 +3781,7 @@ SQLITE_API int sqlite3_open_v2( ** as F) must be one of: **
      **
    • A database filename pointer created by the SQLite core and -** passed into the xOpen() method of a VFS implemention, or +** passed into the xOpen() method of a VFS implementation, or **
    • A filename obtained from [sqlite3_db_filename()], or **
    • A new filename constructed using [sqlite3_create_filename()]. **
    @@ -3698,10 +3836,10 @@ SQLITE_API int sqlite3_open_v2( ** ** See the [URI filename] documentation for additional information. */ -SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); -SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); -SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); -SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N); +SQLITE_API const char *sqlite3_uri_parameter(sqlite3_filename z, const char *zParam); +SQLITE_API int sqlite3_uri_boolean(sqlite3_filename z, const char *zParam, int bDefault); +SQLITE_API sqlite3_int64 sqlite3_uri_int64(sqlite3_filename, const char*, sqlite3_int64); +SQLITE_API const char *sqlite3_uri_key(sqlite3_filename z, int N); /* ** CAPI3REF: Translate filenames @@ -3730,9 +3868,9 @@ SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N); ** return value from [sqlite3_db_filename()], then the result is ** undefined and is likely a memory access violation. */ -SQLITE_API const char *sqlite3_filename_database(const char*); -SQLITE_API const char *sqlite3_filename_journal(const char*); -SQLITE_API const char *sqlite3_filename_wal(const char*); +SQLITE_API const char *sqlite3_filename_database(sqlite3_filename); +SQLITE_API const char *sqlite3_filename_journal(sqlite3_filename); +SQLITE_API const char *sqlite3_filename_wal(sqlite3_filename); /* ** CAPI3REF: Database File Corresponding To A Journal @@ -3756,7 +3894,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); /* ** CAPI3REF: Create and Destroy VFS Filenames ** -** These interfces are provided for use by [VFS shim] implementations and +** These interfaces are provided for use by [VFS shim] implementations and ** are not useful outside of that context. ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of @@ -3798,14 +3936,14 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** then the corresponding [sqlite3_module.xClose() method should also be ** invoked prior to calling sqlite3_free_filename(Y). */ -SQLITE_API char *sqlite3_create_filename( +SQLITE_API sqlite3_filename sqlite3_create_filename( const char *zDatabase, const char *zJournal, const char *zWal, int nParam, const char **azParam ); -SQLITE_API void sqlite3_free_filename(char*); +SQLITE_API void sqlite3_free_filename(sqlite3_filename); /* ** CAPI3REF: Error Codes And Messages @@ -3835,14 +3973,17 @@ SQLITE_API void sqlite3_free_filename(char*); ** ** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. +** text that describes the error, as either UTF-8 or UTF-16 respectively, +** or NULL if no error message is available. +** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by ** subsequent calls to other SQLite interface functions.)^ ** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. +** ^The sqlite3_errstr(E) interface returns the English-language text +** that describes the [result code] E, as UTF-8, or NULL if E is not an +** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** @@ -4303,6 +4444,41 @@ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); */ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt); +/* +** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement +** METHOD: sqlite3_stmt +** +** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN +** setting for [prepared statement] S. If E is zero, then S becomes +** a normal prepared statement. If E is 1, then S behaves as if +** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if +** its SQL text began with "[EXPLAIN QUERY PLAN]". +** +** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared. +** SQLite tries to avoid a reprepare, but a reprepare might be necessary +** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode. +** +** Because of the potential need to reprepare, a call to +** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be +** reprepared because it was created using [sqlite3_prepare()] instead of +** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and +** hence has no saved SQL text with which to reprepare. +** +** Changing the explain setting for a prepared statement does not change +** the original SQL text for the statement. Hence, if the SQL text originally +** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0) +** is called to convert the statement into an ordinary statement, the EXPLAIN +** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S) +** output, even though the statement now acts like a normal SQL statement. +** +** This routine returns SQLITE_OK if the explain mode is successfully +** changed, or an error code if the explain mode could not be changed. +** The explain mode cannot be changed while a statement is active. +** Hence, it is good practice to call [sqlite3_reset(S)] +** immediately prior to calling sqlite3_stmt_explain(S,E). +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode); + /* ** CAPI3REF: Determine If A Prepared Statement Has Been Reset ** METHOD: sqlite3_stmt @@ -4466,7 +4642,7 @@ typedef struct sqlite3_context sqlite3_context; ** with it may be passed. ^It is called to dispose of the BLOB or string even ** if the call to the bind API fails, except the destructor is not called if ** the third parameter is a NULL pointer or the fourth parameter is negative. -** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that +** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that ** the application remains responsible for disposing of the object. ^In this ** case, the object and the provided pointer to it must remain valid until ** either the prepared statement is finalized or the same SQL parameter is @@ -5145,20 +5321,33 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); ** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S ** back to the beginning of its program. ** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. +** ^The return code from [sqlite3_reset(S)] indicates whether or not +** the previous evaluation of prepared statement S completed successfully. +** ^If [sqlite3_step(S)] has never before been called on S or if +** [sqlite3_step(S)] has not been called since the previous call +** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return +** [SQLITE_OK]. ** ** ^If the most recent call to [sqlite3_step(S)] for the ** [prepared statement] S indicated an error, then ** [sqlite3_reset(S)] returns an appropriate [error code]. +** ^The [sqlite3_reset(S)] interface might also return an [error code] +** if there were no prior errors but the process of resetting +** the prepared statement caused a new error. ^For example, if an +** [INSERT] statement with a [RETURNING] clause is only stepped one time, +** that one call to [sqlite3_step(S)] might return SQLITE_ROW but +** the overall statement might still fail and the [sqlite3_reset(S)] call +** might return SQLITE_BUSY if locking constraints prevent the +** database change from committing. Therefore, it is important that +** applications check the return code from [sqlite3_reset(S)] even if +** no prior call to [sqlite3_step(S)] indicated a problem. ** ** ^The [sqlite3_reset(S)] interface does not change the values ** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. */ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); + /* ** CAPI3REF: Create Or Redefine SQL Functions ** KEYWORDS: {function creation routines} @@ -5364,10 +5553,21 @@ SQLITE_API int sqlite3_create_window_function( ** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in ** schema structures such as [CHECK constraints], [DEFAULT clauses], ** [expression indexes], [partial indexes], or [generated columns]. -** The SQLITE_DIRECTONLY flags is a security feature which is recommended -** for all [application-defined SQL functions], and especially for functions -** that have side-effects or that could potentially leak sensitive -** information. +**

    +** The SQLITE_DIRECTONLY flag is recommended for any +** [application-defined SQL function] +** that has side-effects or that could potentially leak sensitive information. +** This will prevent attacks in which an application is tricked +** into using a database file that has had its schema surreptitiously +** modified to invoke the application-defined function in ways that are +** harmful. +**

    +** Some people say it is good practice to set SQLITE_DIRECTONLY on all +** [application-defined SQL functions], regardless of whether or not they +** are security sensitive, as doing so prevents those functions from being used +** inside of the database schema, and thus ensures that the database +** can be inspected and modified using generic tools (such as the [CLI]) +** that do not have access to the application-defined functions. ** ** ** [[SQLITE_INNOCUOUS]]

    SQLITE_INNOCUOUS
    @@ -5394,13 +5594,27 @@ SQLITE_API int sqlite3_create_window_function( **
    ** ** [[SQLITE_SUBTYPE]]
    SQLITE_SUBTYPE
    -** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]]
    SQLITE_RESULT_SUBTYPE
    +** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. **
    ** */ @@ -5408,6 +5622,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions @@ -5573,6 +5788,28 @@ SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); SQLITE_API int sqlite3_value_nochange(sqlite3_value*); SQLITE_API int sqlite3_value_frombind(sqlite3_value*); +/* +** CAPI3REF: Report the internal text encoding state of an sqlite3_value object +** METHOD: sqlite3_value +** +** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], +** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding +** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) +** returns something other than SQLITE_TEXT, then the return value from +** sqlite3_value_encoding(X) is meaningless. ^Calls to +** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)], +** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or +** [sqlite3_value_bytes16(X)] might change the encoding of the value X and +** thus change the return from subsequent calls to sqlite3_value_encoding(X). +** +** This routine is intended for used by applications that test and validate +** the SQLite implementation. This routine is inquiring about the opaque +** internal state of an [sqlite3_value] object. Ordinary applications should +** not need to know what the internal state of an sqlite3_value object is and +** hence should not need to use this interface. +*/ +SQLITE_API int sqlite3_value_encoding(sqlite3_value*); + /* ** CAPI3REF: Finding The Subtype Of SQL Values ** METHOD: sqlite3_value @@ -5582,6 +5819,12 @@ SQLITE_API int sqlite3_value_frombind(sqlite3_value*); ** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); @@ -5625,7 +5868,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value*); ** ** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer ** when first called if N is less than or equal to zero or if a memory -** allocate error occurs. +** allocation error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is ** determined by the N parameter on first successful call. Changing the @@ -5680,48 +5923,56 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); ** METHOD: sqlite3_context ** ** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. +** associate auxiliary data with argument values. If the same argument +** value is passed to multiple invocations of the same SQL function during +** query execution, under some circumstances the associated auxiliary data +** might be preserved. An example of where this might be useful is in a +** regular-expression matching function. The compiled version of the regular +** expression can be stored as auxiliary data associated with the pattern string. ** Then as long as the pattern string remains the same, ** the compiled regular expression can be reused on multiple ** invocations of the same function. ** -** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata +** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data ** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument ** value to the application-defined function. ^N is zero for the left-most -** function argument. ^If there is no metadata +** function argument. ^If there is no auxiliary data ** associated with the function argument, the sqlite3_get_auxdata(C,N) interface ** returns a NULL pointer. ** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent +** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the +** N-th argument of the application-defined function. ^Subsequent ** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. +** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or +** NULL if the auxiliary data has been discarded. ** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, ** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including:
      +** once, when the auxiliary data is discarded. +** SQLite is free to discard the auxiliary data at any time, including:
        **
      • ^(when the corresponding function parameter changes)^, or **
      • ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the ** SQL statement)^, or **
      • ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or **
      • ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^
      +** allocation error occurs.)^ +**
    • ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^
    ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. -** -** ^(In practice, metadata is preserved between function calls for +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. +** +** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal ** values and [parameters] and expressions composed from the same.)^ ** @@ -5731,10 +5982,67 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); ** ** These routines must be called from the same thread in which ** the SQL function is running. +** +** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()]. */ SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); +/* +** CAPI3REF: Database Connection Client Data +** METHOD: sqlite3 +** +** These functions are used to associate one or more named pointers +** with a [database connection]. +** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P +** to be attached to [database connection] D using name N. Subsequent +** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P +** or a NULL pointer if there were no prior calls to +** sqlite3_set_clientdata() with the same values of D and N. +** Names are compared using strcmp() and are thus case sensitive. +** +** If P and X are both non-NULL, then the destructor X is invoked with +** argument P on the first of the following occurrences: +**
      +**
    • An out-of-memory error occurs during the call to +** sqlite3_set_clientdata() which attempts to register pointer P. +**
    • A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made +** with the same D and N parameters. +**
    • The database connection closes. SQLite does not make any guarantees +** about the order in which destructors are called, only that all +** destructors will be called exactly once at some point during the +** database connection closing process. +**
    +** +** SQLite does not do anything with client data other than invoke +** destructors on the client data at the appropriate time. The intended +** use for client data is to provide a mechanism for wrapper libraries +** to store additional information about an SQLite database connection. +** +** There is no limit (other than available memory) on the number of different +** client data pointers (with different names) that can be attached to a +** single database connection. However, the implementation is optimized +** for the case of having only one or two different client data names. +** Applications and wrapper libraries are discouraged from using more than +** one client data name each. +** +** There is no way to enumerate the client data pointers +** associated with a database connection. The N parameter can be thought +** of as a secret key such that only code that knows the secret key is able +** to access the associated data. +** +** Security Warning: These interfaces should not be exposed in scripting +** languages or in other circumstances where it might be possible for an +** an attacker to invoke them. Any agent that can invoke these interfaces +** can probably also take control of the process. +** +** Database connection client data is only available for SQLite +** version 3.44.0 ([dateof:3.44.0]) and later. +** +** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()]. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*); +SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*)); /* ** CAPI3REF: Constants Defining Special Destructor Behavior @@ -5830,9 +6138,10 @@ typedef void (*sqlite3_destructor_type)(void*); ** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]. ** ^SQLite takes the text result from the application from ** the 2nd parameter of the sqlite3_result_text* interfaces. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is negative, then SQLite takes result text from the 2nd parameter -** through the first zero character. +** ^If the 3rd parameter to any of the sqlite3_result_text* interfaces +** other than sqlite3_result_text64() is negative, then SQLite computes +** the string length itself by searching the 2nd parameter for the first +** zero character. ** ^If the 3rd parameter to the sqlite3_result_text* interfaces ** is non-negative, then as many bytes (not characters) of the text ** pointed to by the 2nd parameter are taken as the application-defined @@ -5935,6 +6244,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); ** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); @@ -6166,6 +6489,13 @@ SQLITE_API void sqlite3_activate_cerod( ** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. */ SQLITE_API int sqlite3_sleep(int); @@ -6388,7 +6718,7 @@ SQLITE_API const char *sqlite3_db_name(sqlite3 *db, int N); **
  • [sqlite3_filename_wal()] ** */ -SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); +SQLITE_API sqlite3_filename sqlite3_db_filename(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Determine if a database is read-only @@ -6419,7 +6749,7 @@ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); /* -** CAPI3REF: Allowed return values from [sqlite3_txn_state()] +** CAPI3REF: Allowed return values from sqlite3_txn_state() ** KEYWORDS: {transaction state} ** ** These constants define the current transaction state of a database file. @@ -6525,7 +6855,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); ** function C that is invoked prior to each autovacuum of the database ** file. ^The callback is passed a copy of the generic data pointer (P), ** the schema-name of the attached database that is being autovacuumed, -** the the size of the database file in pages, the number of free pages, +** the size of the database file in pages, the number of free pages, ** and the number of bytes per page, respectively. The callback should ** return the number of free pages that should be removed by the ** autovacuum. ^If the callback returns zero, then no autovacuum happens. @@ -6551,7 +6881,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); ** ^Each call to the sqlite3_autovacuum_pages() interface overrides all ** previous invocations for that database connection. ^If the callback ** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, -** then the autovacuum steps callback is cancelled. The return value +** then the autovacuum steps callback is canceled. The return value ** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might ** be some other error code if something goes wrong. The current ** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other @@ -6646,6 +6976,11 @@ SQLITE_API void *sqlite3_update_hook( ** to the same database. Sharing is enabled if the argument is true ** and disabled if the argument is false.)^ ** +** This interface is omitted if SQLite is compiled with +** [-DSQLITE_OMIT_SHARED_CACHE]. The [-DSQLITE_OMIT_SHARED_CACHE] +** compile-time option is recommended because the +** [use of shared cache mode is discouraged]. +** ** ^Cache sharing is enabled and disabled for an entire process. ** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]). ** In prior versions of SQLite, @@ -6744,7 +7079,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); ** ^The soft heap limit may not be greater than the hard heap limit. ** ^If the hard heap limit is enabled and if sqlite3_soft_heap_limit(N) ** is invoked with a value of N that is greater than the hard heap limit, -** the the soft heap limit is set to the value of the hard heap limit. +** the soft heap limit is set to the value of the hard heap limit. ** ^The soft heap limit is automatically enabled whenever the hard heap ** limit is enabled. ^When sqlite3_hard_heap_limit64(N) is invoked and ** the soft heap limit is outside the range of 1..N, then the soft heap @@ -7005,15 +7340,6 @@ SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void)); */ SQLITE_API void sqlite3_reset_auto_extension(void); -/* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - /* ** Structures used by the virtual table interface */ @@ -7074,6 +7400,10 @@ struct sqlite3_module { /* The methods above are in versions 1 and 2 of the sqlite_module object. ** Those below are for version 3 and greater. */ int (*xShadowName)(const char*); + /* The methods above are in versions 1 through 3 of the sqlite_module object. + ** Those below are for version 4 and greater. */ + int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema, + const char *zTabName, int mFlags, char **pzErr); }; /* @@ -7132,10 +7462,10 @@ struct sqlite3_module { ** when the omit flag is true there is no guarantee that the constraint will ** not be checked again using byte code.)^ ** -** ^The idxNum and idxPtr values are recorded and passed into the +** ^The idxNum and idxStr values are recorded and passed into the ** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. +** ^[sqlite3_free()] is used to free idxStr if and only if +** needToFreeIdxStr is true. ** ** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in ** the correct order to satisfy the ORDER BY clause so that no separate @@ -7255,7 +7585,7 @@ struct sqlite3_index_info { ** the [sqlite3_vtab_collation()] interface. For most real-world virtual ** tables, the collating sequence of constraints does not matter (for example ** because the constraints are numeric) and so the sqlite3_vtab_collation() -** interface is no commonly needed. +** interface is not commonly needed. */ #define SQLITE_INDEX_CONSTRAINT_EQ 2 #define SQLITE_INDEX_CONSTRAINT_GT 4 @@ -7414,16 +7744,6 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL); */ SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); -/* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - /* ** CAPI3REF: A Handle To An Open BLOB ** KEYWORDS: {BLOB handle} {BLOB handles} @@ -7571,7 +7891,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); ** code is returned and the transaction rolled back. ** ** Calling this function with an argument that is not a NULL pointer or an -** open blob handle results in undefined behaviour. ^Calling this routine +** open blob handle results in undefined behavior. ^Calling this routine ** with a null pointer (such as would be returned by a failed call to ** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function ** is passed a valid open blob handle, the values returned by the @@ -7798,18 +8118,20 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ** ^(Some systems (for example, Windows 95) do not support the operation ** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable -** behavior.)^ +** will always return SQLITE_BUSY. In most cases the SQLite core only uses +** sqlite3_mutex_try() as an optimization, so this is acceptable +** behavior. The exceptions are unix builds that set the +** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working +** sqlite3_mutex_try() is required.)^ ** ** ^The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior ** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */ @@ -8051,6 +8373,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_PRNG_SAVE 5 #define SQLITE_TESTCTRL_PRNG_RESTORE 6 #define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */ +#define SQLITE_TESTCTRL_FK_NO_ACTION 7 #define SQLITE_TESTCTRL_BITVEC_TEST 8 #define SQLITE_TESTCTRL_FAULT_INSTALL 9 #define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 @@ -8058,6 +8381,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_ASSERT 12 #define SQLITE_TESTCTRL_ALWAYS 13 #define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */ +#define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ @@ -8079,7 +8403,8 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -9039,7 +9364,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** if the application incorrectly accesses the destination [database connection] ** and so no error code is reported, but the operations may malfunction ** nevertheless. Use of the destination database connection while a -** backup is in progress might also also cause a mutex deadlock. +** backup is in progress might also cause a mutex deadlock. ** ** If running in [shared cache mode], the application must ** guarantee that the shared cache used by the destination database @@ -9467,7 +9792,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( */ #define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */ #define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */ -#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */ +#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for readers */ #define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */ /* @@ -9535,7 +9860,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_DIRECTONLY]]
    SQLITE_VTAB_DIRECTONLY
    **
    Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** prohibits that virtual table from being used from within triggers and ** views. **
    @@ -9543,18 +9868,28 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
    SQLITE_VTAB_INNOCUOUS
    **
    Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a ** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS ** flag unless absolutely necessary. **
    +** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]
    SQLITE_VTAB_USES_ALL_SCHEMAS
    +**
    Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +**
    ** */ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 #define SQLITE_VTAB_INNOCUOUS 2 #define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy @@ -9627,7 +9962,7 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*); **
  • Otherwise, "BINARY" is returned. ** */ -SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int); +SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); /* ** CAPI3REF: Determine if a virtual table query is DISTINCT @@ -9715,7 +10050,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*); ** communicated to the xBestIndex method as a ** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use ** this constraint, it must set the corresponding -** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under +** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under ** the usual mode of handling IN operators, SQLite generates [bytecode] ** that invokes the [xFilter|xFilter() method] once for each value ** on the right-hand side of the IN operator.)^ Thus the virtual table @@ -9784,21 +10119,20 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); ** is undefined and probably harmful. ** ** The X parameter in a call to sqlite3_vtab_in_first(X,P) or -** sqlite3_vtab_in_next(X,P) must be one of the parameters to the +** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint ** processing use the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint -** processing, then these routines return [SQLITE_MISUSE])^ or perhaps -** exhibit some other undefined or harmful behavior. +** processing, then these routines return [SQLITE_ERROR].)^ ** ** ^(Use these routines to access all values on the right-hand side ** of the IN constraint using code like the following: ** **

     **    for(rc=sqlite3_vtab_in_first(pList, &pVal);
    -**        rc==SQLITE_OK && pVal
    +**        rc==SQLITE_OK && pVal;
     **        rc=sqlite3_vtab_in_next(pList, &pVal)
     **    ){
     **      // do something with pVal
    @@ -9896,6 +10230,10 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
     ** managed by the prepared statement S and will be automatically freed when
     ** S is finalized.
     **
    +** Not all values are available for all query elements. When a value is
    +** not available, the output variable is set to -1 if the value is numeric,
    +** or to NULL if it is a string (SQLITE_SCANSTAT_NAME).
    +**
     ** 
    ** [[SQLITE_SCANSTAT_NLOOP]]
    SQLITE_SCANSTAT_NLOOP
    **
    ^The [sqlite3_int64] variable pointed to by the V parameter will be @@ -9923,12 +10261,24 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] ** description for the X-th loop. ** -** [[SQLITE_SCANSTAT_SELECTID]]
    SQLITE_SCANSTAT_SELECT
    +** [[SQLITE_SCANSTAT_SELECTID]]
    SQLITE_SCANSTAT_SELECTID
    **
    ^The "int" variable pointed to by the V parameter will be set to the -** "select-id" for the X-th loop. The select-id identifies which query or -** subquery the loop is part of. The main query has a select-id of zero. -** The select-id is the same value as is output in the first column -** of an [EXPLAIN QUERY PLAN] query. +** id for the X-th query plan element. The id value is unique within the +** statement. The select-id is the same value as is output in the first +** column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_PARENTID]]
    SQLITE_SCANSTAT_PARENTID
    +**
    The "int" variable pointed to by the V parameter will be set to the +** the id of the parent of the current query element, if applicable, or +** to zero if the query element has no parent. This is the same value as +** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_NCYCLE]]
    SQLITE_SCANSTAT_NCYCLE
    +**
    The sqlite3_int64 output value is set to the number of cycles, +** according to the processor time-stamp counter, that elapsed while the +** query element was being processed. This value is not available for +** all query elements - if it is unavailable the output variable is +** set to -1. **
    */ #define SQLITE_SCANSTAT_NLOOP 0 @@ -9937,12 +10287,14 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** #define SQLITE_SCANSTAT_NAME 3 #define SQLITE_SCANSTAT_EXPLAIN 4 #define SQLITE_SCANSTAT_SELECTID 5 +#define SQLITE_SCANSTAT_PARENTID 6 +#define SQLITE_SCANSTAT_NCYCLE 7 /* ** CAPI3REF: Prepared Statement Scan Status ** METHOD: sqlite3_stmt ** -** This interface returns information about the predicted and measured +** These interfaces return information about the predicted and measured ** performance for pStmt. Advanced applications can use this ** interface to compare the predicted and the measured performance and ** issue warnings and/or rerun [ANALYZE] if discrepancies are found. @@ -9953,19 +10305,25 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** ** The "iScanStatusOp" parameter determines which status information to return. ** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior -** of this interface is undefined. -** ^The requested measurement is written into a variable pointed to by -** the "pOut" parameter. -** Parameter "idx" identifies the specific loop to retrieve statistics for. -** Loops are numbered starting from zero. ^If idx is out of range - less than -** zero or greater than or equal to the total number of loops used to implement -** the statement - a non-zero value is returned and the variable that pOut -** points to is unchanged. -** -** ^Statistics might not be available for all loops in all statements. ^In cases -** where there exist loops with no available statistics, this function behaves -** as if the loop did not exist - it returns non-zero and leave the variable -** that pOut points to unchanged. +** of this interface is undefined. ^The requested measurement is written into +** a variable pointed to by the "pOut" parameter. +** +** The "flags" parameter must be passed a mask of flags. At present only +** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX +** is specified, then status information is available for all elements +** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If +** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements +** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of +** the EXPLAIN QUERY PLAN output) are available. Invoking API +** sqlite3_stmt_scanstatus() is equivalent to calling +** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. +** +** Parameter "idx" identifies the specific query element to retrieve statistics +** for. Query elements are numbered starting from zero. A value of -1 may be +** to query for statistics regarding the entire query. ^If idx is out of range +** - less than -1 or greater than or equal to the total number of query +** elements used to implement the statement - a non-zero value is returned and +** the variable that pOut points to is unchanged. ** ** See also: [sqlite3_stmt_scanstatus_reset()] */ @@ -9975,6 +10333,19 @@ SQLITE_API int sqlite3_stmt_scanstatus( int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ void *pOut /* Result written here */ ); +SQLITE_API int sqlite3_stmt_scanstatus_v2( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + int flags, /* Mask of flags defined below */ + void *pOut /* Result written here */ +); + +/* +** CAPI3REF: Prepared Statement Scan Status +** KEYWORDS: {scan status flags} +*/ +#define SQLITE_SCANSTAT_COMPLEX 0x0001 /* ** CAPI3REF: Zero Scan-Status Counters @@ -10065,6 +10436,10 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** function is not defined for operations on WITHOUT ROWID tables, or for ** DELETE operations on rowid tables. ** +** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from +** the previous call on the same [database connection] D, or NULL for +** the first call on D. +** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces ** provide additional information about a preupdate event. These routines @@ -10104,7 +10479,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** When the [sqlite3_blob_write()] API is used to update a blob column, ** the pre-update hook is invoked with SQLITE_DELETE. This is because the ** in this case the new values are not available. In this case, when a -** callback made with op==SQLITE_DELETE is actuall a write using the +** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the ** pre-update hook is being invoked for some other reason, including a @@ -10365,6 +10740,13 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy ** of the database exists. ** +** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set, +** the returned buffer content will remain accessible and unchanged +** until either the next write operation on the connection or when +** the connection is closed, and applications must not modify the +** buffer. If the bit had been clear, the returned buffer will not +** be accessed by SQLite after the call. +** ** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the ** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory ** allocation error occurs. @@ -10413,6 +10795,9 @@ SQLITE_API unsigned char *sqlite3_serialize( ** SQLite will try to increase the buffer size using sqlite3_realloc64() ** if writes on the database cause it to grow larger than M bytes. ** +** Applications must not modify the buffer P or invalidate it before +** the database connection D is closed. +** ** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the ** database is currently in a read transaction or is involved in a backup ** operation. @@ -10421,6 +10806,13 @@ SQLITE_API unsigned char *sqlite3_serialize( ** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** +** The deserialized database should not be in [WAL mode]. If the database +** is in WAL mode, then any attempt to use the database file will result +** in an [SQLITE_CANTOPEN] error. The application can set the +** [file format version numbers] (bytes 18 and 19) of the input database P +** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the +** database file into rollback mode and work around this limitation. +** ** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning. @@ -10470,6 +10862,19 @@ SQLITE_API int sqlite3_deserialize( # undef double #endif +#if defined(__wasi__) +# undef SQLITE_WASI +# define SQLITE_WASI 1 +# undef SQLITE_OMIT_WAL +# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ +# ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION +# endif +# ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 0 +# endif +#endif + #ifdef __cplusplus } /* End of the 'extern "C"' block */ #endif @@ -10676,16 +11081,20 @@ SQLITE_API int sqlite3session_create( SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* -** CAPIREF: Conigure a Session Object +** CAPI3REF: Configure a Session Object ** METHOD: sqlite3_session ** ** This method is used to configure a session object after it has been -** created. At present the only valid value for the second parameter is -** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. ** -** Arguments for sqlite3session_object_config() +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config ** -** The following values may passed as the the 4th parameter to +** The following values may passed as the the 2nd parameter to ** sqlite3session_object_config(). ** **
    SQLITE_SESSION_OBJCONFIG_SIZE
    @@ -10701,12 +11110,21 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); ** ** It is an error (SQLITE_MISUSE) to attempt to modify this setting after ** the first table has been attached to the session object. +** +**
    SQLITE_SESSION_OBJCONFIG_ROWID
    +** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. */ -SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); - -/* -*/ -#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 /* ** CAPI3REF: Enable Or Disable A Session Object @@ -11467,6 +11885,18 @@ SQLITE_API int sqlite3changeset_concat( ); +/* +** CAPI3REF: Upgrade the Schema of a Changeset/Patchset +*/ +SQLITE_API int sqlite3changeset_upgrade( + sqlite3 *db, + const char *zDb, + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + + + /* ** CAPI3REF: Changegroup Handle ** @@ -11513,6 +11943,38 @@ typedef struct sqlite3_changegroup sqlite3_changegroup; */ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); +/* +** CAPI3REF: Add a Schema to a Changegroup +** METHOD: sqlite3_changegroup_schema +** +** This method may be used to optionally enforce the rule that the changesets +** added to the changegroup handle must match the schema of database zDb +** ("main", "temp", or the name of an attached database). If +** sqlite3changegroup_add() is called to add a changeset that is not compatible +** with the configured schema, SQLITE_SCHEMA is returned and the changegroup +** object is left in an undefined state. +** +** A changeset schema is considered compatible with the database schema in +** the same way as for sqlite3changeset_apply(). Specifically, for each +** table in the changeset, there exists a database table with: +** +**
      +**
    • The name identified by the changeset, and +**
    • at least as many columns as recorded in the changeset, and +**
    • the primary key columns in the same position as recorded in +** the changeset. +**
    +** +** The output of the changegroup object always has the same schema as the +** database nominated using this function. In cases where changesets passed +** to sqlite3changegroup_add() have fewer columns than the corresponding table +** in the database schema, these are filled in using the default column +** values from the database schema. This makes it possible to combined +** changesets that have different numbers of columns for a single table +** within a changegroup, provided that they are otherwise compatible. +*/ +SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb); + /* ** CAPI3REF: Add A Changeset To A Changegroup ** METHOD: sqlite3_changegroup @@ -11581,13 +12043,18 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); ** If the new changeset contains changes to a table that is already present ** in the changegroup, then the number of columns and the position of the ** primary key columns for the table must be consistent. If this is not the -** case, this function fails with SQLITE_SCHEMA. If the input changeset -** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is -** returned. Or, if an out-of-memory condition occurs during processing, this -** function returns SQLITE_NOMEM. In all cases, if an error occurs the state -** of the final contents of the changegroup is undefined. +** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup +** object has been configured with a database schema using the +** sqlite3changegroup_schema() API, then it is possible to combine changesets +** with different numbers of columns for a single table, provided that +** they are otherwise compatible. +** +** If the input changeset appears to be corrupt and the corruption is +** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition +** occurs during processing, this function returns SQLITE_NOMEM. ** -** If no error occurs, SQLITE_OK is returned. +** In all cases, if an error occurs the state of the final contents of the +** changegroup is undefined. If no error occurs, SQLITE_OK is returned. */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); @@ -11839,9 +12306,30 @@ SQLITE_API int sqlite3changeset_apply_v2( ** Invert the changeset before applying it. This is equivalent to inverting ** a changeset using sqlite3changeset_invert() before applying it. It is ** an error to specify this flag with a patchset. +** +**
    SQLITE_CHANGESETAPPLY_IGNORENOOP
    +** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +**
      +**
    • a delete change if the row being deleted cannot be found, +**
    • an update change if the modified fields are already set to +** their new values in the conflicting row, or +**
    • an insert change if all fields of the conflicting row match +** the row being inserted. +**
    +** +**
    SQLITE_CHANGESETAPPLY_FKNOACTION
    +** If this flag it set, then all foreign key constraints in the target +** database behave as if they were declared with "ON UPDATE NO ACTION ON +** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL +** or SET DEFAULT. */ #define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 #define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 +#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008 /* ** CAPI3REF: Constants Passed To The Conflict Handler @@ -12407,8 +12895,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -12418,8 +12909,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -12435,12 +12928,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -12466,6 +12960,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -12580,6 +13078,39 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { int iVersion; /* Currently always set to 3 */ @@ -12617,6 +13148,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* @@ -12811,8 +13349,8 @@ struct Fts5ExtensionApi { ** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; @@ -12860,7 +13398,7 @@ struct fts5_api { int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) ); @@ -12869,7 +13407,7 @@ struct fts5_api { int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer ); @@ -12877,7 +13415,7 @@ struct fts5_api { int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) ); diff --git a/Sources/SQLCipher/sqlite3.c b/Sources/SQLCipher/sqlite3.c index feb5c9e281..2ae58031bd 100644 --- a/Sources/SQLCipher/sqlite3.c +++ b/Sources/SQLCipher/sqlite3.c @@ -1,7 +1,7 @@ #include /****************************************************************************** ** This file is an amalgamation of many separate C source files from SQLite -** version 3.39.4. By combining all the individual C code files into this +** version 3.45.3. By combining all the individual C code files into this ** single large file, the entire code can be compiled as a single translation ** unit. This allows many compilers to do optimizations that would not be ** possible if the files were compiled separately. Performance improvements @@ -17,6 +17,46 @@ ** if you want a wrapper to interface SQLite with your choice of programming ** language. The code for the "sqlite3" command-line shell is also in a ** separate file. This file contains only code for the core SQLite library. +** +** The content in this amalgamation comes from Fossil check-in +** 8653b758870e6ef0c98d46b3ace27849054a with changes in files: +** +** .fossil-settings/empty-dirs +** .fossil-settings/ignore-glob +** LICENSE.md +** Makefile.in +** Makefile.msc +** README.md +** aclocal.m4 +** configure +** configure.ac +** ltmain.sh +** sqlite3.1 +** sqlite3.pc.in +** sqlite_cfg.h.in +** src/attach.c +** src/backup.c +** src/ctime.c +** src/func.c +** src/global.c +** src/main.c +** src/malloc.c +** src/pager.c +** src/pager.h +** src/pragma.c +** src/pragma.h +** src/shell.c.in +** src/sqlite.h.in +** src/sqliteInt.h +** src/tclsqlite.c +** src/test1.c +** src/test_config.c +** src/test_thread.c +** src/util.c +** src/vacuum.c +** src/wal.c +** tool/mkpragmatab.tcl +** tool/mksqlite3c.tcl */ #define SQLITE_CORE 1 #define SQLITE_AMALGAMATION 1 @@ -51,11 +91,11 @@ ** used on lines of code that actually ** implement parts of coverage testing. ** -** OPTIMIZATION-IF-TRUE - This branch is allowed to alway be false +** OPTIMIZATION-IF-TRUE - This branch is allowed to always be false ** and the correct answer is still obtained, ** though perhaps more slowly. ** -** OPTIMIZATION-IF-FALSE - This branch is allowed to alway be true +** OPTIMIZATION-IF-FALSE - This branch is allowed to always be true ** and the correct answer is still obtained, ** though perhaps more slowly. ** @@ -124,6 +164,10 @@ #define SQLITE_4_BYTE_ALIGNED_MALLOC #endif /* defined(_MSC_VER) && !defined(_WIN64) */ +#if !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 +#define HAVE_LOG2 0 +#endif /* !defined(HAVE_LOG2) && defined(_MSC_VER) && _MSC_VER<1800 */ + #endif /* SQLITE_MSVC_H */ /************** End of msvc.h ************************************************/ @@ -453,9 +497,9 @@ extern "C" { ** [sqlite3_libversion_number()], [sqlite3_sourceid()], ** [sqlite_version()] and [sqlite_source_id()]. */ -#define SQLITE_VERSION "3.39.4" -#define SQLITE_VERSION_NUMBER 3039004 -#define SQLITE_SOURCE_ID "2022-09-29 15:55:41 a29f9949895322123f7c38fbe94c649a9d6e6c9cd0c3b41c96d694552f26alt1" +#define SQLITE_VERSION "3.45.3" +#define SQLITE_VERSION_NUMBER 3045003 +#define SQLITE_SOURCE_ID "2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1ealt1" /* ** CAPI3REF: Run-Time Library Version Numbers @@ -727,6 +771,8 @@ typedef int (*sqlite3_callback)(void*,int,char**, char**); ** the 1st parameter to sqlite3_exec() while sqlite3_exec() is running. **
  • The application must not modify the SQL statement text passed into ** the 2nd parameter of sqlite3_exec() while sqlite3_exec() is running. +**
  • The application must not dereference the arrays or string pointers +** passed as the 3rd and 4th callback parameters after it returns. ** */ SQLITE_API int sqlite3_exec( @@ -835,6 +881,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8)) #define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8)) #define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8)) +#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8)) #define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8)) #define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8)) #define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8)) @@ -870,6 +917,7 @@ SQLITE_API int sqlite3_exec( #define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8)) #define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8)) #define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8)) +#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8)) #define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8)) #define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8)) #define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8)) @@ -977,13 +1025,17 @@ SQLITE_API int sqlite3_exec( ** ** SQLite uses one of these integer values as the second ** argument to calls it makes to the xLock() and xUnlock() methods -** of an [sqlite3_io_methods] object. +** of an [sqlite3_io_methods] object. These values are ordered from +** lest restrictive to most restrictive. +** +** The argument to xLock() is always SHARED or higher. The argument to +** xUnlock is either SHARED or NONE. */ -#define SQLITE_LOCK_NONE 0 -#define SQLITE_LOCK_SHARED 1 -#define SQLITE_LOCK_RESERVED 2 -#define SQLITE_LOCK_PENDING 3 -#define SQLITE_LOCK_EXCLUSIVE 4 +#define SQLITE_LOCK_NONE 0 /* xUnlock() only */ +#define SQLITE_LOCK_SHARED 1 /* xLock() or xUnlock() */ +#define SQLITE_LOCK_RESERVED 2 /* xLock() only */ +#define SQLITE_LOCK_PENDING 3 /* xLock() only */ +#define SQLITE_LOCK_EXCLUSIVE 4 /* xLock() only */ /* ** CAPI3REF: Synchronization Type Flags @@ -1061,7 +1113,14 @@ struct sqlite3_file { **
  • [SQLITE_LOCK_PENDING], or **
  • [SQLITE_LOCK_EXCLUSIVE]. ** -** xLock() increases the lock. xUnlock() decreases the lock. +** xLock() upgrades the database file lock. In other words, xLock() moves the +** database file lock in the direction NONE toward EXCLUSIVE. The argument to +** xLock() is always on of SHARED, RESERVED, PENDING, or EXCLUSIVE, never +** SQLITE_LOCK_NONE. If the database file lock is already at or above the +** requested lock, then the call to xLock() is a no-op. +** xUnlock() downgrades the database file lock to either SHARED or NONE. +* If the lock is already at or below the requested lock state, then the call +** to xUnlock() is a no-op. ** The xCheckReservedLock() method checks whether any database connection, ** either in this process or in some other process, is holding a RESERVED, ** PENDING, or EXCLUSIVE lock on the file. It returns true @@ -1166,9 +1225,8 @@ struct sqlite3_io_methods { ** opcode causes the xFileControl method to write the current state of ** the lock (one of [SQLITE_LOCK_NONE], [SQLITE_LOCK_SHARED], ** [SQLITE_LOCK_RESERVED], [SQLITE_LOCK_PENDING], or [SQLITE_LOCK_EXCLUSIVE]) -** into an integer that the pArg argument points to. This capability -** is used during testing and is only available when the SQLITE_TEST -** compile-time option is used. +** into an integer that the pArg argument points to. +** This capability is only available if SQLite is compiled with [SQLITE_DEBUG]. ** **
  • [[SQLITE_FCNTL_SIZE_HINT]] ** The [SQLITE_FCNTL_SIZE_HINT] opcode is used by SQLite to give the VFS @@ -1472,7 +1530,6 @@ struct sqlite3_io_methods { ** in wal mode after the client has finished copying pages from the wal ** file to the database file, but before the *-shm file is updated to ** record the fact that the pages have been checkpointed. -** ** **
  • [[SQLITE_FCNTL_EXTERNAL_READER]] ** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect @@ -1485,10 +1542,16 @@ struct sqlite3_io_methods { ** the database is not a wal-mode db, or if there is no such connection in any ** other process. This opcode cannot be used to detect transactions opened ** by clients within the current process, only within other processes. -** ** **
  • [[SQLITE_FCNTL_CKSM_FILE]] -** Used by the cksmvfs VFS module only. +** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the +** [checksum VFS shim] only. +** +**
  • [[SQLITE_FCNTL_RESET_CACHE]] +** If there is currently no transaction open on the database, and the +** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control +** purges the contents of the in-memory page cache. If there is an open +** transaction, or if the db is a temp-db, this opcode is a no-op, not an error. ** */ #define SQLITE_FCNTL_LOCKSTATE 1 @@ -1531,6 +1594,7 @@ struct sqlite3_io_methods { #define SQLITE_FCNTL_CKPT_START 39 #define SQLITE_FCNTL_EXTERNAL_READER 40 #define SQLITE_FCNTL_CKSM_FILE 41 +#define SQLITE_FCNTL_RESET_CACHE 42 /* deprecated names */ #define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE @@ -1560,6 +1624,26 @@ typedef struct sqlite3_mutex sqlite3_mutex; */ typedef struct sqlite3_api_routines sqlite3_api_routines; +/* +** CAPI3REF: File Name +** +** Type [sqlite3_filename] is used by SQLite to pass filenames to the +** xOpen method of a [VFS]. It may be cast to (const char*) and treated +** as a normal, nul-terminated, UTF-8 buffer containing the filename, but +** may also be passed to special APIs such as: +** +**
      +**
    • sqlite3_filename_database() +**
    • sqlite3_filename_journal() +**
    • sqlite3_filename_wal() +**
    • sqlite3_uri_parameter() +**
    • sqlite3_uri_boolean() +**
    • sqlite3_uri_int64() +**
    • sqlite3_uri_key() +**
    +*/ +typedef const char *sqlite3_filename; + /* ** CAPI3REF: OS Interface Object ** @@ -1738,7 +1822,7 @@ struct sqlite3_vfs { sqlite3_vfs *pNext; /* Next registered VFS */ const char *zName; /* Name of this virtual file system */ void *pAppData; /* Pointer to application-specific data */ - int (*xOpen)(sqlite3_vfs*, const char *zName, sqlite3_file*, + int (*xOpen)(sqlite3_vfs*, sqlite3_filename zName, sqlite3_file*, int flags, int *pOutFlags); int (*xDelete)(sqlite3_vfs*, const char *zName, int syncDir); int (*xAccess)(sqlite3_vfs*, const char *zName, int flags, int *pResOut); @@ -1925,20 +2009,23 @@ SQLITE_API int sqlite3_os_end(void); ** must ensure that no other SQLite interfaces are invoked by other ** threads while sqlite3_config() is running. ** -** The sqlite3_config() interface -** may only be invoked prior to library initialization using -** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. -** ^If sqlite3_config() is called after [sqlite3_initialize()] and before -** [sqlite3_shutdown()] then it will return SQLITE_MISUSE. -** Note, however, that ^sqlite3_config() can be called as part of the -** implementation of an application-defined [sqlite3_os_init()]. -** ** The first argument to sqlite3_config() is an integer ** [configuration option] that determines ** what property of SQLite is to be configured. Subsequent arguments ** vary depending on the [configuration option] ** in the first argument. ** +** For most configuration options, the sqlite3_config() interface +** may only be invoked prior to library initialization using +** [sqlite3_initialize()] or after shutdown by [sqlite3_shutdown()]. +** The exceptional configuration options that may be invoked at any time +** are called "anytime configuration options". +** ^If sqlite3_config() is called after [sqlite3_initialize()] and before +** [sqlite3_shutdown()] with a first argument that is not an anytime +** configuration option, then the sqlite3_config() call will return SQLITE_MISUSE. +** Note, however, that ^sqlite3_config() can be called as part of the +** implementation of an application-defined [sqlite3_os_init()]. +** ** ^When a configuration option is set, sqlite3_config() returns [SQLITE_OK]. ** ^If the option is unknown or SQLite is unable to set the option ** then this routine returns a non-zero [error code]. @@ -2046,6 +2133,23 @@ struct sqlite3_mem_methods { ** These constants are the available integer configuration options that ** can be passed as the first argument to the [sqlite3_config()] interface. ** +** Most of the configuration options for sqlite3_config() +** will only work if invoked prior to [sqlite3_initialize()] or after +** [sqlite3_shutdown()]. The few exceptions to this rule are called +** "anytime configuration options". +** ^Calling [sqlite3_config()] with a first argument that is not an +** anytime configuration option in between calls to [sqlite3_initialize()] and +** [sqlite3_shutdown()] is a no-op that returns SQLITE_MISUSE. +** +** The set of anytime configuration options can change (by insertions +** and/or deletions) from one release of SQLite to the next. +** As of SQLite version 3.42.0, the complete set of anytime configuration +** options is: +**
      +**
    • SQLITE_CONFIG_LOG +**
    • SQLITE_CONFIG_PCACHE_HDRSZ +**
    +** ** New configuration options may be added in future releases of SQLite. ** Existing configuration options might be discontinued. Applications ** should check the return code from [sqlite3_config()] to make sure that @@ -2376,7 +2480,7 @@ struct sqlite3_mem_methods { ** is stored in each sorted record and the required column values loaded ** from the database as records are returned in sorted order. The default ** value for this option is to never use this optimization. Specifying a -** negative value for this option restores the default behaviour. +** negative value for this option restores the default behavior. ** This option is only available if SQLite is compiled with the ** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option. ** @@ -2390,30 +2494,46 @@ struct sqlite3_mem_methods { ** configuration setting is never used, then the default maximum is determined ** by the [SQLITE_MEMDB_DEFAULT_MAXSIZE] compile-time option. If that ** compile-time option is not set, then the default maximum is 1073741824. +** +** [[SQLITE_CONFIG_ROWID_IN_VIEW]] +**
    SQLITE_CONFIG_ROWID_IN_VIEW +**
    The SQLITE_CONFIG_ROWID_IN_VIEW option enables or disables the ability +** for VIEWs to have a ROWID. The capability can only be enabled if SQLite is +** compiled with -DSQLITE_ALLOW_ROWID_IN_VIEW, in which case the capability +** defaults to on. This configuration option queries the current setting or +** changes the setting to off or on. The argument is a pointer to an integer. +** If that integer initially holds a value of 1, then the ability for VIEWs to +** have ROWIDs is activated. If the integer initially holds zero, then the +** ability is deactivated. Any other initial value for the integer leaves the +** setting unchanged. After changes, if any, the integer is written with +** a 1 or 0, if the ability for VIEWs to have ROWIDs is on or off. If SQLite +** is compiled without -DSQLITE_ALLOW_ROWID_IN_VIEW (which is the usual and +** recommended case) then the integer is always filled with zero, regardless +** if its initial value. ** */ -#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ -#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ -#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ -#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ -#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ -#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ -#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ -#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ -#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ -#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ -/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ -#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ -#define SQLITE_CONFIG_PCACHE 14 /* no-op */ -#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ -#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ -#define SQLITE_CONFIG_URI 17 /* int */ -#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ -#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_SINGLETHREAD 1 /* nil */ +#define SQLITE_CONFIG_MULTITHREAD 2 /* nil */ +#define SQLITE_CONFIG_SERIALIZED 3 /* nil */ +#define SQLITE_CONFIG_MALLOC 4 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_GETMALLOC 5 /* sqlite3_mem_methods* */ +#define SQLITE_CONFIG_SCRATCH 6 /* No longer used */ +#define SQLITE_CONFIG_PAGECACHE 7 /* void*, int sz, int N */ +#define SQLITE_CONFIG_HEAP 8 /* void*, int nByte, int min */ +#define SQLITE_CONFIG_MEMSTATUS 9 /* boolean */ +#define SQLITE_CONFIG_MUTEX 10 /* sqlite3_mutex_methods* */ +#define SQLITE_CONFIG_GETMUTEX 11 /* sqlite3_mutex_methods* */ +/* previously SQLITE_CONFIG_CHUNKALLOC 12 which is now unused. */ +#define SQLITE_CONFIG_LOOKASIDE 13 /* int int */ +#define SQLITE_CONFIG_PCACHE 14 /* no-op */ +#define SQLITE_CONFIG_GETPCACHE 15 /* no-op */ +#define SQLITE_CONFIG_LOG 16 /* xFunc, void* */ +#define SQLITE_CONFIG_URI 17 /* int */ +#define SQLITE_CONFIG_PCACHE2 18 /* sqlite3_pcache_methods2* */ +#define SQLITE_CONFIG_GETPCACHE2 19 /* sqlite3_pcache_methods2* */ #define SQLITE_CONFIG_COVERING_INDEX_SCAN 20 /* int */ -#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ -#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ +#define SQLITE_CONFIG_SQLLOG 21 /* xSqllog, void* */ +#define SQLITE_CONFIG_MMAP_SIZE 22 /* sqlite3_int64, sqlite3_int64 */ #define SQLITE_CONFIG_WIN32_HEAPSIZE 23 /* int nByte */ #define SQLITE_CONFIG_PCACHE_HDRSZ 24 /* int *psz */ #define SQLITE_CONFIG_PMASZ 25 /* unsigned int szPma */ @@ -2421,6 +2541,7 @@ struct sqlite3_mem_methods { #define SQLITE_CONFIG_SMALL_MALLOC 27 /* boolean */ #define SQLITE_CONFIG_SORTERREF_SIZE 28 /* int nByte */ #define SQLITE_CONFIG_MEMDB_MAXSIZE 29 /* sqlite3_int64 */ +#define SQLITE_CONFIG_ROWID_IN_VIEW 30 /* int* */ /* ** CAPI3REF: Database Connection Configuration Options @@ -2454,7 +2575,7 @@ struct sqlite3_mem_methods { ** configuration for a database connection can only be changed when that ** connection is not currently using lookaside memory, or in other words ** when the "current value" returned by -** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero. +** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero. ** Any attempt to change the lookaside memory configuration when lookaside ** memory is in use leaves the configuration unchanged and returns ** [SQLITE_BUSY].)^
    @@ -2551,7 +2672,7 @@ struct sqlite3_mem_methods { ** database handle, SQLite checks if this will mean that there are now no ** connections at all to the database. If so, it performs a checkpoint ** operation before closing the connection. This option may be used to -** override this behaviour. The first parameter passed to this operation +** override this behavior. The first parameter passed to this operation ** is an integer - positive to disable checkpoints-on-close, or zero (the ** default) to enable them, and negative to leave the setting unchanged. ** The second parameter is a pointer to an integer @@ -2604,8 +2725,12 @@ struct sqlite3_mem_methods { **
  • sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0); ** ** Because resetting a database is destructive and irreversible, the -** process requires the use of this obscure API and multiple steps to help -** ensure that it does not happen by accident. +** process requires the use of this obscure API and multiple steps to +** help ensure that it does not happen by accident. Because this +** feature must be capable of resetting corrupt databases, and +** shutting down virtual tables may require access to that corrupt +** storage, the library must abandon any installed virtual tables +** without calling their xDestroy() methods. ** ** [[SQLITE_DBCONFIG_DEFENSIVE]]
    SQLITE_DBCONFIG_DEFENSIVE
    **
    The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the @@ -2616,6 +2741,7 @@ struct sqlite3_mem_methods { **
      **
    • The [PRAGMA writable_schema=ON] statement. **
    • The [PRAGMA journal_mode=OFF] statement. +**
    • The [PRAGMA schema_version=N] statement. **
    • Writes to the [sqlite_dbpage] virtual table. **
    • Direct writes to [shadow tables]. **
    @@ -2643,7 +2769,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_DQS_DML]] -**
    SQLITE_DBCONFIG_DQS_DML +**
    SQLITE_DBCONFIG_DQS_DML
    **
    The SQLITE_DBCONFIG_DQS_DML option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DML statements ** only, that is DELETE, INSERT, SELECT, and UPDATE statements. The @@ -2652,7 +2778,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_DQS_DDL]] -**
    SQLITE_DBCONFIG_DQS_DDL +**
    SQLITE_DBCONFIG_DQS_DDL
    **
    The SQLITE_DBCONFIG_DQS option activates or deactivates ** the legacy [double-quoted string literal] misfeature for DDL statements, ** such as CREATE TABLE and CREATE INDEX. The @@ -2661,7 +2787,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_TRUSTED_SCHEMA]] -**
    SQLITE_DBCONFIG_TRUSTED_SCHEMA +**
    SQLITE_DBCONFIG_TRUSTED_SCHEMA
    **
    The SQLITE_DBCONFIG_TRUSTED_SCHEMA option tells SQLite to ** assume that database schemas are untainted by malicious content. ** When the SQLITE_DBCONFIG_TRUSTED_SCHEMA option is disabled, SQLite @@ -2681,7 +2807,7 @@ struct sqlite3_mem_methods { **
    ** ** [[SQLITE_DBCONFIG_LEGACY_FILE_FORMAT]] -**
    SQLITE_DBCONFIG_LEGACY_FILE_FORMAT +**
    SQLITE_DBCONFIG_LEGACY_FILE_FORMAT
    **
    The SQLITE_DBCONFIG_LEGACY_FILE_FORMAT option activates or deactivates ** the legacy file format flag. When activated, this flag causes all newly ** created database file to have a schema format version number (the 4-byte @@ -2690,7 +2816,7 @@ struct sqlite3_mem_methods { ** any SQLite version back to 3.0.0 ([dateof:3.0.0]). Without this setting, ** newly created databases are generally not understandable by SQLite versions ** prior to 3.3.0 ([dateof:3.3.0]). As these words are written, there -** is now scarcely any need to generated database files that are compatible +** is now scarcely any need to generate database files that are compatible ** all the way back to version 3.0.0, and so this setting is of little ** practical use, but is provided so that SQLite can continue to claim the ** ability to generate new database files that are compatible with version @@ -2699,8 +2825,40 @@ struct sqlite3_mem_methods { ** the [VACUUM] command will fail with an obscure error when attempting to ** process a table with generated columns and a descending index. This is ** not considered a bug since SQLite versions 3.3.0 and earlier do not support -** either generated columns or decending indexes. +** either generated columns or descending indexes. +**
    +** +** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]] +**
    SQLITE_DBCONFIG_STMT_SCANSTATUS
    +**
    The SQLITE_DBCONFIG_STMT_SCANSTATUS option is only useful in +** SQLITE_ENABLE_STMT_SCANSTATUS builds. In this case, it sets or clears +** a flag that enables collection of the sqlite3_stmt_scanstatus_v2() +** statistics. For statistics to be collected, the flag must be set on +** the database handle both when the SQL statement is prepared and when it +** is stepped. The flag is set (collection of statistics is enabled) +** by default. This option takes two arguments: an integer and a pointer to +** an integer.. The first argument is 1, 0, or -1 to enable, disable, or +** leave unchanged the statement scanstatus option. If the second argument +** is not NULL, then the value of the statement scanstatus setting after +** processing the first argument is written into the integer that the second +** argument points to. +**
    +** +** [[SQLITE_DBCONFIG_REVERSE_SCANORDER]] +**
    SQLITE_DBCONFIG_REVERSE_SCANORDER
    +**
    The SQLITE_DBCONFIG_REVERSE_SCANORDER option changes the default order +** in which tables and indexes are scanned so that the scans start at the end +** and work toward the beginning rather than starting at the beginning and +** working toward the end. Setting SQLITE_DBCONFIG_REVERSE_SCANORDER is the +** same as setting [PRAGMA reverse_unordered_selects]. This option takes +** two arguments which are an integer and a pointer to an integer. The first +** argument is 1, 0, or -1 to enable, disable, or leave unchanged the +** reverse scan order flag, respectively. If the second argument is not NULL, +** then 0 or 1 is written into the integer that the second argument points to +** depending on if the reverse scan order flag is set after processing the +** first argument. **
    +** ** */ #define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */ @@ -2721,7 +2879,9 @@ struct sqlite3_mem_methods { #define SQLITE_DBCONFIG_ENABLE_VIEW 1015 /* int int* */ #define SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016 /* int int* */ #define SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017 /* int int* */ -#define SQLITE_DBCONFIG_MAX 1017 /* Largest DBCONFIG */ +#define SQLITE_DBCONFIG_STMT_SCANSTATUS 1018 /* int int* */ +#define SQLITE_DBCONFIG_REVERSE_SCANORDER 1019 /* int int* */ +#define SQLITE_DBCONFIG_MAX 1019 /* Largest DBCONFIG */ /* ** CAPI3REF: Enable Or Disable Extended Result Codes @@ -2943,8 +3103,13 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*); ** ^A call to sqlite3_interrupt(D) that occurs when there are no running ** SQL statements is a no-op and has no effect on SQL statements ** that are started after the sqlite3_interrupt() call returns. +** +** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether +** or not an interrupt is currently in effect for [database connection] D. +** It returns 1 if an interrupt is currently in effect, or 0 otherwise. */ SQLITE_API void sqlite3_interrupt(sqlite3*); +SQLITE_API int sqlite3_is_interrupted(sqlite3*); /* ** CAPI3REF: Determine If An SQL Statement Is Complete @@ -3562,8 +3727,8 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*, **
    ^An SQLITE_TRACE_PROFILE callback provides approximately the same ** information as is provided by the [sqlite3_profile()] callback. ** ^The P argument is a pointer to the [prepared statement] and the -** X argument points to a 64-bit integer which is the estimated of -** the number of nanosecond that the prepared statement took to run. +** X argument points to a 64-bit integer which is approximately +** the number of nanoseconds that the prepared statement took to run. ** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes. ** ** [[SQLITE_TRACE_ROW]]
    SQLITE_TRACE_ROW
    @@ -3595,8 +3760,10 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*, ** M argument should be the bitwise OR-ed combination of ** zero or more [SQLITE_TRACE] constants. ** -** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides -** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2(). +** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P) +** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or +** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each +** database connection may have at most one trace callback. ** ** ^The X callback is invoked whenever any of the events identified by ** mask M occur. ^The integer return value from the callback is currently @@ -3626,7 +3793,7 @@ SQLITE_API int sqlite3_trace_v2( ** ** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback ** function X to be invoked periodically during long running calls to -** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for +** [sqlite3_step()] and [sqlite3_prepare()] and similar for ** database connection D. An example use for this ** interface is to keep a GUI updated during a large query. ** @@ -3651,6 +3818,13 @@ SQLITE_API int sqlite3_trace_v2( ** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their ** database connections for the meaning of "modify" in this paragraph. ** +** The progress handler callback would originally only be invoked from the +** bytecode engine. It still might be invoked during [sqlite3_prepare()] +** and similar because those routines might force a reparse of the schema +** which involves running the bytecode engine. However, beginning with +** SQLite version 3.41.0, the progress handler callback might also be +** invoked directly from [sqlite3_prepare()] while analyzing and generating +** code for complex queries. */ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); @@ -3687,13 +3861,18 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** **
    ** ^(
    [SQLITE_OPEN_READONLY]
    -**
    The database is opened in read-only mode. If the database does not -** already exist, an error is returned.
    )^ +**
    The database is opened in read-only mode. If the database does +** not already exist, an error is returned.
    )^ ** ** ^(
    [SQLITE_OPEN_READWRITE]
    -**
    The database is opened for reading and writing if possible, or reading -** only if the file is write protected by the operating system. In either -** case the database must already exist, otherwise an error is returned.
    )^ +**
    The database is opened for reading and writing if possible, or +** reading only if the file is write protected by the operating +** system. In either case the database must already exist, otherwise +** an error is returned. For historical reasons, if opening in +** read-write mode fails due to OS-level permissions, an attempt is +** made to open it in read-only mode. [sqlite3_db_readonly()] can be +** used to determine whether the database is actually +** read-write.
    )^ ** ** ^(
    [SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]
    **
    The database is opened for reading and writing, and is created if @@ -3731,6 +3910,9 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); **
    The database is opened [shared cache] enabled, overriding ** the default shared cache setting provided by ** [sqlite3_enable_shared_cache()].)^ +** The [use of shared cache mode is discouraged] and hence shared cache +** capabilities may be omitted from many builds of SQLite. In such cases, +** this option is a no-op. ** ** ^(
    [SQLITE_OPEN_PRIVATECACHE]
    **
    The database is opened [shared cache] disabled, overriding @@ -3746,7 +3928,7 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*); ** to return an extended result code.
    ** ** [[OPEN_NOFOLLOW]] ^(
    [SQLITE_OPEN_NOFOLLOW]
    -**
    The database filename is not allowed to be a symbolic link
    +**
    The database filename is not allowed to contain a symbolic link
    **
    )^ ** ** If the 3rd parameter to sqlite3_open_v2() is not one of the @@ -3950,7 +4132,7 @@ SQLITE_API int sqlite3_open_v2( ** as F) must be one of: **
      **
    • A database filename pointer created by the SQLite core and -** passed into the xOpen() method of a VFS implemention, or +** passed into the xOpen() method of a VFS implementation, or **
    • A filename obtained from [sqlite3_db_filename()], or **
    • A new filename constructed using [sqlite3_create_filename()]. **
    @@ -4005,10 +4187,10 @@ SQLITE_API int sqlite3_open_v2( ** ** See the [URI filename] documentation for additional information. */ -SQLITE_API const char *sqlite3_uri_parameter(const char *zFilename, const char *zParam); -SQLITE_API int sqlite3_uri_boolean(const char *zFile, const char *zParam, int bDefault); -SQLITE_API sqlite3_int64 sqlite3_uri_int64(const char*, const char*, sqlite3_int64); -SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N); +SQLITE_API const char *sqlite3_uri_parameter(sqlite3_filename z, const char *zParam); +SQLITE_API int sqlite3_uri_boolean(sqlite3_filename z, const char *zParam, int bDefault); +SQLITE_API sqlite3_int64 sqlite3_uri_int64(sqlite3_filename, const char*, sqlite3_int64); +SQLITE_API const char *sqlite3_uri_key(sqlite3_filename z, int N); /* ** CAPI3REF: Translate filenames @@ -4037,9 +4219,9 @@ SQLITE_API const char *sqlite3_uri_key(const char *zFilename, int N); ** return value from [sqlite3_db_filename()], then the result is ** undefined and is likely a memory access violation. */ -SQLITE_API const char *sqlite3_filename_database(const char*); -SQLITE_API const char *sqlite3_filename_journal(const char*); -SQLITE_API const char *sqlite3_filename_wal(const char*); +SQLITE_API const char *sqlite3_filename_database(sqlite3_filename); +SQLITE_API const char *sqlite3_filename_journal(sqlite3_filename); +SQLITE_API const char *sqlite3_filename_wal(sqlite3_filename); /* ** CAPI3REF: Database File Corresponding To A Journal @@ -4063,7 +4245,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); /* ** CAPI3REF: Create and Destroy VFS Filenames ** -** These interfces are provided for use by [VFS shim] implementations and +** These interfaces are provided for use by [VFS shim] implementations and ** are not useful outside of that context. ** ** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of @@ -4105,14 +4287,14 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*); ** then the corresponding [sqlite3_module.xClose() method should also be ** invoked prior to calling sqlite3_free_filename(Y). */ -SQLITE_API char *sqlite3_create_filename( +SQLITE_API sqlite3_filename sqlite3_create_filename( const char *zDatabase, const char *zJournal, const char *zWal, int nParam, const char **azParam ); -SQLITE_API void sqlite3_free_filename(char*); +SQLITE_API void sqlite3_free_filename(sqlite3_filename); /* ** CAPI3REF: Error Codes And Messages @@ -4142,14 +4324,17 @@ SQLITE_API void sqlite3_free_filename(char*); ** ** ** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language -** text that describes the error, as either UTF-8 or UTF-16 respectively. +** text that describes the error, as either UTF-8 or UTF-16 respectively, +** or NULL if no error message is available. +** (See how SQLite handles [invalid UTF] for exceptions to this rule.) ** ^(Memory to hold the error message string is managed internally. ** The application does not need to worry about freeing the result. ** However, the error string might be overwritten or deallocated by ** subsequent calls to other SQLite interface functions.)^ ** -** ^The sqlite3_errstr() interface returns the English-language text -** that describes the [result code], as UTF-8. +** ^The sqlite3_errstr(E) interface returns the English-language text +** that describes the [result code] E, as UTF-8, or NULL if E is not an +** result code for which a text error message is available. ** ^(Memory to hold the error message string is managed internally ** and must not be freed by the application)^. ** @@ -4610,6 +4795,41 @@ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt); */ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt); +/* +** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement +** METHOD: sqlite3_stmt +** +** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN +** setting for [prepared statement] S. If E is zero, then S becomes +** a normal prepared statement. If E is 1, then S behaves as if +** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if +** its SQL text began with "[EXPLAIN QUERY PLAN]". +** +** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared. +** SQLite tries to avoid a reprepare, but a reprepare might be necessary +** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode. +** +** Because of the potential need to reprepare, a call to +** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be +** reprepared because it was created using [sqlite3_prepare()] instead of +** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and +** hence has no saved SQL text with which to reprepare. +** +** Changing the explain setting for a prepared statement does not change +** the original SQL text for the statement. Hence, if the SQL text originally +** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0) +** is called to convert the statement into an ordinary statement, the EXPLAIN +** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S) +** output, even though the statement now acts like a normal SQL statement. +** +** This routine returns SQLITE_OK if the explain mode is successfully +** changed, or an error code if the explain mode could not be changed. +** The explain mode cannot be changed while a statement is active. +** Hence, it is good practice to call [sqlite3_reset(S)] +** immediately prior to calling sqlite3_stmt_explain(S,E). +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode); + /* ** CAPI3REF: Determine If A Prepared Statement Has Been Reset ** METHOD: sqlite3_stmt @@ -4773,7 +4993,7 @@ typedef struct sqlite3_context sqlite3_context; ** with it may be passed. ^It is called to dispose of the BLOB or string even ** if the call to the bind API fails, except the destructor is not called if ** the third parameter is a NULL pointer or the fourth parameter is negative. -** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that +** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that ** the application remains responsible for disposing of the object. ^In this ** case, the object and the provided pointer to it must remain valid until ** either the prepared statement is finalized or the same SQL parameter is @@ -5452,20 +5672,33 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt); ** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S ** back to the beginning of its program. ** -** ^If the most recent call to [sqlite3_step(S)] for the -** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE], -** or if [sqlite3_step(S)] has never before been called on S, -** then [sqlite3_reset(S)] returns [SQLITE_OK]. +** ^The return code from [sqlite3_reset(S)] indicates whether or not +** the previous evaluation of prepared statement S completed successfully. +** ^If [sqlite3_step(S)] has never before been called on S or if +** [sqlite3_step(S)] has not been called since the previous call +** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return +** [SQLITE_OK]. ** ** ^If the most recent call to [sqlite3_step(S)] for the ** [prepared statement] S indicated an error, then ** [sqlite3_reset(S)] returns an appropriate [error code]. +** ^The [sqlite3_reset(S)] interface might also return an [error code] +** if there were no prior errors but the process of resetting +** the prepared statement caused a new error. ^For example, if an +** [INSERT] statement with a [RETURNING] clause is only stepped one time, +** that one call to [sqlite3_step(S)] might return SQLITE_ROW but +** the overall statement might still fail and the [sqlite3_reset(S)] call +** might return SQLITE_BUSY if locking constraints prevent the +** database change from committing. Therefore, it is important that +** applications check the return code from [sqlite3_reset(S)] even if +** no prior call to [sqlite3_step(S)] indicated a problem. ** ** ^The [sqlite3_reset(S)] interface does not change the values ** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S. */ SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt); + /* ** CAPI3REF: Create Or Redefine SQL Functions ** KEYWORDS: {function creation routines} @@ -5671,10 +5904,21 @@ SQLITE_API int sqlite3_create_window_function( ** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in ** schema structures such as [CHECK constraints], [DEFAULT clauses], ** [expression indexes], [partial indexes], or [generated columns]. -** The SQLITE_DIRECTONLY flags is a security feature which is recommended -** for all [application-defined SQL functions], and especially for functions -** that have side-effects or that could potentially leak sensitive -** information. +**

    +** The SQLITE_DIRECTONLY flag is recommended for any +** [application-defined SQL function] +** that has side-effects or that could potentially leak sensitive information. +** This will prevent attacks in which an application is tricked +** into using a database file that has had its schema surreptitiously +** modified to invoke the application-defined function in ways that are +** harmful. +**

    +** Some people say it is good practice to set SQLITE_DIRECTONLY on all +** [application-defined SQL functions], regardless of whether or not they +** are security sensitive, as doing so prevents those functions from being used +** inside of the database schema, and thus ensures that the database +** can be inspected and modified using generic tools (such as the [CLI]) +** that do not have access to the application-defined functions. **

  • ** ** [[SQLITE_INNOCUOUS]]
    SQLITE_INNOCUOUS
    @@ -5701,13 +5945,27 @@ SQLITE_API int sqlite3_create_window_function( **
    ** ** [[SQLITE_SUBTYPE]]
    SQLITE_SUBTYPE
    -** The SQLITE_SUBTYPE flag indicates to SQLite that a function may call +** The SQLITE_SUBTYPE flag indicates to SQLite that a function might call ** [sqlite3_value_subtype()] to inspect the sub-types of its arguments. -** Specifying this flag makes no difference for scalar or aggregate user -** functions. However, if it is not specified for a user-defined window -** function, then any sub-types belonging to arguments passed to the window -** function may be discarded before the window function is called (i.e. -** sqlite3_value_subtype() will always return 0). +** This flag instructs SQLite to omit some corner-case optimizations that +** might disrupt the operation of the [sqlite3_value_subtype()] function, +** causing it to return zero rather than the correct subtype(). +** SQL functions that invokes [sqlite3_value_subtype()] should have this +** property. If the SQLITE_SUBTYPE property is omitted, then the return +** value from [sqlite3_value_subtype()] might sometimes be zero even though +** a non-zero subtype was specified by the function argument expression. +** +** [[SQLITE_RESULT_SUBTYPE]]
    SQLITE_RESULT_SUBTYPE
    +** The SQLITE_RESULT_SUBTYPE flag indicates to SQLite that a function might call +** [sqlite3_result_subtype()] to cause a sub-type to be associated with its +** result. +** Every function that invokes [sqlite3_result_subtype()] should have this +** property. If it does not, then the call to [sqlite3_result_subtype()] +** might become a no-op if the function is used as term in an +** [expression index]. On the other hand, SQL functions that never invoke +** [sqlite3_result_subtype()] should avoid setting this property, as the +** purpose of this property is to disable certain optimizations that are +** incompatible with subtypes. **
    ** */ @@ -5715,6 +5973,7 @@ SQLITE_API int sqlite3_create_window_function( #define SQLITE_DIRECTONLY 0x000080000 #define SQLITE_SUBTYPE 0x000100000 #define SQLITE_INNOCUOUS 0x000200000 +#define SQLITE_RESULT_SUBTYPE 0x001000000 /* ** CAPI3REF: Deprecated Functions @@ -5880,6 +6139,28 @@ SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*); SQLITE_API int sqlite3_value_nochange(sqlite3_value*); SQLITE_API int sqlite3_value_frombind(sqlite3_value*); +/* +** CAPI3REF: Report the internal text encoding state of an sqlite3_value object +** METHOD: sqlite3_value +** +** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8], +** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding +** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X) +** returns something other than SQLITE_TEXT, then the return value from +** sqlite3_value_encoding(X) is meaningless. ^Calls to +** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)], +** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or +** [sqlite3_value_bytes16(X)] might change the encoding of the value X and +** thus change the return from subsequent calls to sqlite3_value_encoding(X). +** +** This routine is intended for used by applications that test and validate +** the SQLite implementation. This routine is inquiring about the opaque +** internal state of an [sqlite3_value] object. Ordinary applications should +** not need to know what the internal state of an sqlite3_value object is and +** hence should not need to use this interface. +*/ +SQLITE_API int sqlite3_value_encoding(sqlite3_value*); + /* ** CAPI3REF: Finding The Subtype Of SQL Values ** METHOD: sqlite3_value @@ -5889,6 +6170,12 @@ SQLITE_API int sqlite3_value_frombind(sqlite3_value*); ** information can be used to pass a limited amount of context from ** one SQL function to another. Use the [sqlite3_result_subtype()] ** routine to set the subtype for the return value of an SQL function. +** +** Every [application-defined SQL function] that invoke this interface +** should include the [SQLITE_SUBTYPE] property in the text +** encoding argument when the function is [sqlite3_create_function|registered]. +** If the [SQLITE_SUBTYPE] property is omitted, then sqlite3_value_subtype() +** might return zero instead of the upstream subtype in some corner cases. */ SQLITE_API unsigned int sqlite3_value_subtype(sqlite3_value*); @@ -5932,7 +6219,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value*); ** ** ^The sqlite3_aggregate_context(C,N) routine returns a NULL pointer ** when first called if N is less than or equal to zero or if a memory -** allocate error occurs. +** allocation error occurs. ** ** ^(The amount of space allocated by sqlite3_aggregate_context(C,N) is ** determined by the N parameter on first successful call. Changing the @@ -5987,48 +6274,56 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); ** METHOD: sqlite3_context ** ** These functions may be used by (non-aggregate) SQL functions to -** associate metadata with argument values. If the same value is passed to -** multiple invocations of the same SQL function during query execution, under -** some circumstances the associated metadata may be preserved. An example -** of where this might be useful is in a regular-expression matching -** function. The compiled version of the regular expression can be stored as -** metadata associated with the pattern string. +** associate auxiliary data with argument values. If the same argument +** value is passed to multiple invocations of the same SQL function during +** query execution, under some circumstances the associated auxiliary data +** might be preserved. An example of where this might be useful is in a +** regular-expression matching function. The compiled version of the regular +** expression can be stored as auxiliary data associated with the pattern string. ** Then as long as the pattern string remains the same, ** the compiled regular expression can be reused on multiple ** invocations of the same function. ** -** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata +** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data ** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument ** value to the application-defined function. ^N is zero for the left-most -** function argument. ^If there is no metadata +** function argument. ^If there is no auxiliary data ** associated with the function argument, the sqlite3_get_auxdata(C,N) interface ** returns a NULL pointer. ** -** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th -** argument of the application-defined function. ^Subsequent +** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the +** N-th argument of the application-defined function. ^Subsequent ** calls to sqlite3_get_auxdata(C,N) return P from the most recent -** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or -** NULL if the metadata has been discarded. +** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or +** NULL if the auxiliary data has been discarded. ** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL, ** SQLite will invoke the destructor function X with parameter P exactly -** once, when the metadata is discarded. -** SQLite is free to discard the metadata at any time, including:
      +** once, when the auxiliary data is discarded. +** SQLite is free to discard the auxiliary data at any time, including:
        **
      • ^(when the corresponding function parameter changes)^, or **
      • ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the ** SQL statement)^, or **
      • ^(when sqlite3_set_auxdata() is invoked again on the same ** parameter)^, or **
      • ^(during the original sqlite3_set_auxdata() call when a memory -** allocation error occurs.)^
      +** allocation error occurs.)^ +**
    • ^(during the original sqlite3_set_auxdata() call if the function +** is evaluated during query planning instead of during query execution, +** as sometimes happens with [SQLITE_ENABLE_STAT4].)^
    ** -** Note the last bullet in particular. The destructor X in +** Note the last two bullets in particular. The destructor X in ** sqlite3_set_auxdata(C,N,P,X) might be called immediately, before the ** sqlite3_set_auxdata() interface even returns. Hence sqlite3_set_auxdata() ** should be called near the end of the function implementation and the ** function implementation should not make any use of P after -** sqlite3_set_auxdata() has been called. -** -** ^(In practice, metadata is preserved between function calls for +** sqlite3_set_auxdata() has been called. Furthermore, a call to +** sqlite3_get_auxdata() that occurs immediately after a corresponding call +** to sqlite3_set_auxdata() might still return NULL if an out-of-memory +** condition occurred during the sqlite3_set_auxdata() call or if the +** function is being evaluated during query planning rather than during +** query execution. +** +** ^(In practice, auxiliary data is preserved between function calls for ** function parameters that are compile-time constants, including literal ** values and [parameters] and expressions composed from the same.)^ ** @@ -6038,10 +6333,67 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*); ** ** These routines must be called from the same thread in which ** the SQL function is running. +** +** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()]. */ SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N); SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*)); +/* +** CAPI3REF: Database Connection Client Data +** METHOD: sqlite3 +** +** These functions are used to associate one or more named pointers +** with a [database connection]. +** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P +** to be attached to [database connection] D using name N. Subsequent +** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P +** or a NULL pointer if there were no prior calls to +** sqlite3_set_clientdata() with the same values of D and N. +** Names are compared using strcmp() and are thus case sensitive. +** +** If P and X are both non-NULL, then the destructor X is invoked with +** argument P on the first of the following occurrences: +**
      +**
    • An out-of-memory error occurs during the call to +** sqlite3_set_clientdata() which attempts to register pointer P. +**
    • A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made +** with the same D and N parameters. +**
    • The database connection closes. SQLite does not make any guarantees +** about the order in which destructors are called, only that all +** destructors will be called exactly once at some point during the +** database connection closing process. +**
    +** +** SQLite does not do anything with client data other than invoke +** destructors on the client data at the appropriate time. The intended +** use for client data is to provide a mechanism for wrapper libraries +** to store additional information about an SQLite database connection. +** +** There is no limit (other than available memory) on the number of different +** client data pointers (with different names) that can be attached to a +** single database connection. However, the implementation is optimized +** for the case of having only one or two different client data names. +** Applications and wrapper libraries are discouraged from using more than +** one client data name each. +** +** There is no way to enumerate the client data pointers +** associated with a database connection. The N parameter can be thought +** of as a secret key such that only code that knows the secret key is able +** to access the associated data. +** +** Security Warning: These interfaces should not be exposed in scripting +** languages or in other circumstances where it might be possible for an +** an attacker to invoke them. Any agent that can invoke these interfaces +** can probably also take control of the process. +** +** Database connection client data is only available for SQLite +** version 3.44.0 ([dateof:3.44.0]) and later. +** +** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()]. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*); +SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*)); /* ** CAPI3REF: Constants Defining Special Destructor Behavior @@ -6137,9 +6489,10 @@ typedef void (*sqlite3_destructor_type)(void*); ** of [SQLITE_UTF8], [SQLITE_UTF16], [SQLITE_UTF16BE], or [SQLITE_UTF16LE]. ** ^SQLite takes the text result from the application from ** the 2nd parameter of the sqlite3_result_text* interfaces. -** ^If the 3rd parameter to the sqlite3_result_text* interfaces -** is negative, then SQLite takes result text from the 2nd parameter -** through the first zero character. +** ^If the 3rd parameter to any of the sqlite3_result_text* interfaces +** other than sqlite3_result_text64() is negative, then SQLite computes +** the string length itself by searching the 2nd parameter for the first +** zero character. ** ^If the 3rd parameter to the sqlite3_result_text* interfaces ** is non-negative, then as many bytes (not characters) of the text ** pointed to by the 2nd parameter are taken as the application-defined @@ -6242,6 +6595,20 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context*, sqlite3_uint64 n); ** higher order bits are discarded. ** The number of subtype bytes preserved by SQLite might increase ** in future releases of SQLite. +** +** Every [application-defined SQL function] that invokes this interface +** should include the [SQLITE_RESULT_SUBTYPE] property in its +** text encoding argument when the SQL function is +** [sqlite3_create_function|registered]. If the [SQLITE_RESULT_SUBTYPE] +** property is omitted from the function that invokes sqlite3_result_subtype(), +** then in some cases the sqlite3_result_subtype() might fail to set +** the result subtype. +** +** If SQLite is compiled with -DSQLITE_STRICT_SUBTYPE=1, then any +** SQL function that invokes the sqlite3_result_subtype() interface +** and that does not have the SQLITE_RESULT_SUBTYPE property will raise +** an error. Future versions of SQLite might enable -DSQLITE_STRICT_SUBTYPE=1 +** by default. */ SQLITE_API void sqlite3_result_subtype(sqlite3_context*,unsigned int); @@ -6473,6 +6840,13 @@ SQLITE_API void sqlite3_activate_cerod( ** of the default VFS is not implemented correctly, or not implemented at ** all, then the behavior of sqlite3_sleep() may deviate from the description ** in the previous paragraphs. +** +** If a negative argument is passed to sqlite3_sleep() the results vary by +** VFS and operating system. Some system treat a negative argument as an +** instruction to sleep forever. Others understand it to mean do not sleep +** at all. ^In SQLite version 3.42.0 and later, a negative +** argument passed into sqlite3_sleep() is changed to zero before it is relayed +** down into the xSleep method of the VFS. */ SQLITE_API int sqlite3_sleep(int); @@ -6695,7 +7069,7 @@ SQLITE_API const char *sqlite3_db_name(sqlite3 *db, int N); **
  • [sqlite3_filename_wal()] ** */ -SQLITE_API const char *sqlite3_db_filename(sqlite3 *db, const char *zDbName); +SQLITE_API sqlite3_filename sqlite3_db_filename(sqlite3 *db, const char *zDbName); /* ** CAPI3REF: Determine if a database is read-only @@ -6726,7 +7100,7 @@ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName); SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema); /* -** CAPI3REF: Allowed return values from [sqlite3_txn_state()] +** CAPI3REF: Allowed return values from sqlite3_txn_state() ** KEYWORDS: {transaction state} ** ** These constants define the current transaction state of a database file. @@ -6832,7 +7206,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); ** function C that is invoked prior to each autovacuum of the database ** file. ^The callback is passed a copy of the generic data pointer (P), ** the schema-name of the attached database that is being autovacuumed, -** the the size of the database file in pages, the number of free pages, +** the size of the database file in pages, the number of free pages, ** and the number of bytes per page, respectively. The callback should ** return the number of free pages that should be removed by the ** autovacuum. ^If the callback returns zero, then no autovacuum happens. @@ -6858,7 +7232,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*); ** ^Each call to the sqlite3_autovacuum_pages() interface overrides all ** previous invocations for that database connection. ^If the callback ** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer, -** then the autovacuum steps callback is cancelled. The return value +** then the autovacuum steps callback is canceled. The return value ** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might ** be some other error code if something goes wrong. The current ** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other @@ -6953,6 +7327,11 @@ SQLITE_API void *sqlite3_update_hook( ** to the same database. Sharing is enabled if the argument is true ** and disabled if the argument is false.)^ ** +** This interface is omitted if SQLite is compiled with +** [-DSQLITE_OMIT_SHARED_CACHE]. The [-DSQLITE_OMIT_SHARED_CACHE] +** compile-time option is recommended because the +** [use of shared cache mode is discouraged]. +** ** ^Cache sharing is enabled and disabled for an entire process. ** This is a change as of SQLite [version 3.5.0] ([dateof:3.5.0]). ** In prior versions of SQLite, @@ -7051,7 +7430,7 @@ SQLITE_API int sqlite3_db_release_memory(sqlite3*); ** ^The soft heap limit may not be greater than the hard heap limit. ** ^If the hard heap limit is enabled and if sqlite3_soft_heap_limit(N) ** is invoked with a value of N that is greater than the hard heap limit, -** the the soft heap limit is set to the value of the hard heap limit. +** the soft heap limit is set to the value of the hard heap limit. ** ^The soft heap limit is automatically enabled whenever the hard heap ** limit is enabled. ^When sqlite3_hard_heap_limit64(N) is invoked and ** the soft heap limit is outside the range of 1..N, then the soft heap @@ -7312,15 +7691,6 @@ SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void)); */ SQLITE_API void sqlite3_reset_auto_extension(void); -/* -** The interface to the virtual-table mechanism is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - /* ** Structures used by the virtual table interface */ @@ -7381,6 +7751,10 @@ struct sqlite3_module { /* The methods above are in versions 1 and 2 of the sqlite_module object. ** Those below are for version 3 and greater. */ int (*xShadowName)(const char*); + /* The methods above are in versions 1 through 3 of the sqlite_module object. + ** Those below are for version 4 and greater. */ + int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema, + const char *zTabName, int mFlags, char **pzErr); }; /* @@ -7439,10 +7813,10 @@ struct sqlite3_module { ** when the omit flag is true there is no guarantee that the constraint will ** not be checked again using byte code.)^ ** -** ^The idxNum and idxPtr values are recorded and passed into the +** ^The idxNum and idxStr values are recorded and passed into the ** [xFilter] method. -** ^[sqlite3_free()] is used to free idxPtr if and only if -** needToFreeIdxPtr is true. +** ^[sqlite3_free()] is used to free idxStr if and only if +** needToFreeIdxStr is true. ** ** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in ** the correct order to satisfy the ORDER BY clause so that no separate @@ -7562,7 +7936,7 @@ struct sqlite3_index_info { ** the [sqlite3_vtab_collation()] interface. For most real-world virtual ** tables, the collating sequence of constraints does not matter (for example ** because the constraints are numeric) and so the sqlite3_vtab_collation() -** interface is no commonly needed. +** interface is not commonly needed. */ #define SQLITE_INDEX_CONSTRAINT_EQ 2 #define SQLITE_INDEX_CONSTRAINT_GT 4 @@ -7721,16 +8095,6 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL); */ SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg); -/* -** The interface to the virtual-table mechanism defined above (back up -** to a comment remarkably similar to this one) is currently considered -** to be experimental. The interface might change in incompatible ways. -** If this is a problem for you, do not use the interface at this time. -** -** When the virtual-table mechanism stabilizes, we will declare the -** interface fixed, support it indefinitely, and remove this comment. -*/ - /* ** CAPI3REF: A Handle To An Open BLOB ** KEYWORDS: {BLOB handle} {BLOB handles} @@ -7878,7 +8242,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64); ** code is returned and the transaction rolled back. ** ** Calling this function with an argument that is not a NULL pointer or an -** open blob handle results in undefined behaviour. ^Calling this routine +** open blob handle results in undefined behavior. ^Calling this routine ** with a null pointer (such as would be returned by a failed call to ** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function ** is passed a valid open blob handle, the values returned by the @@ -8105,18 +8469,20 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs*); ** ** ^(Some systems (for example, Windows 95) do not support the operation ** implemented by sqlite3_mutex_try(). On those systems, sqlite3_mutex_try() -** will always return SQLITE_BUSY. The SQLite core only ever uses -** sqlite3_mutex_try() as an optimization so this is acceptable -** behavior.)^ +** will always return SQLITE_BUSY. In most cases the SQLite core only uses +** sqlite3_mutex_try() as an optimization, so this is acceptable +** behavior. The exceptions are unix builds that set the +** SQLITE_ENABLE_SETLK_TIMEOUT build option. In that case a working +** sqlite3_mutex_try() is required.)^ ** ** ^The sqlite3_mutex_leave() routine exits a mutex that was ** previously entered by the same thread. The behavior ** is undefined if the mutex is not currently entered by the ** calling thread or is not currently allocated. ** -** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), or -** sqlite3_mutex_leave() is a NULL pointer, then all three routines -** behave as no-ops. +** ^If the argument to sqlite3_mutex_enter(), sqlite3_mutex_try(), +** sqlite3_mutex_leave(), or sqlite3_mutex_free() is a NULL pointer, +** then any of the four routines behaves as a no-op. ** ** See also: [sqlite3_mutex_held()] and [sqlite3_mutex_notheld()]. */ @@ -8358,6 +8724,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_PRNG_SAVE 5 #define SQLITE_TESTCTRL_PRNG_RESTORE 6 #define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */ +#define SQLITE_TESTCTRL_FK_NO_ACTION 7 #define SQLITE_TESTCTRL_BITVEC_TEST 8 #define SQLITE_TESTCTRL_FAULT_INSTALL 9 #define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10 @@ -8365,6 +8732,7 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_ASSERT 12 #define SQLITE_TESTCTRL_ALWAYS 13 #define SQLITE_TESTCTRL_RESERVE 14 /* NOT USED */ +#define SQLITE_TESTCTRL_JSON_SELFCHECK 14 #define SQLITE_TESTCTRL_OPTIMIZATIONS 15 #define SQLITE_TESTCTRL_ISKEYWORD 16 /* NOT USED */ #define SQLITE_TESTCTRL_SCRATCHMALLOC 17 /* NOT USED */ @@ -8386,7 +8754,8 @@ SQLITE_API int sqlite3_test_control(int op, ...); #define SQLITE_TESTCTRL_TRACEFLAGS 31 #define SQLITE_TESTCTRL_TUNE 32 #define SQLITE_TESTCTRL_LOGEST 33 -#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */ +#define SQLITE_TESTCTRL_USELONGDOUBLE 34 +#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */ /* ** CAPI3REF: SQL Keyword Checking @@ -9346,7 +9715,7 @@ typedef struct sqlite3_backup sqlite3_backup; ** if the application incorrectly accesses the destination [database connection] ** and so no error code is reported, but the operations may malfunction ** nevertheless. Use of the destination database connection while a -** backup is in progress might also also cause a mutex deadlock. +** backup is in progress might also cause a mutex deadlock. ** ** If running in [shared cache mode], the application must ** guarantee that the shared cache used by the destination database @@ -9774,7 +10143,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( */ #define SQLITE_CHECKPOINT_PASSIVE 0 /* Do as much as possible w/o blocking */ #define SQLITE_CHECKPOINT_FULL 1 /* Wait for writers, then checkpoint */ -#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for for readers */ +#define SQLITE_CHECKPOINT_RESTART 2 /* Like FULL but wait for readers */ #define SQLITE_CHECKPOINT_TRUNCATE 3 /* Like RESTART but also truncate WAL */ /* @@ -9842,7 +10211,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_DIRECTONLY]]
    SQLITE_VTAB_DIRECTONLY
    **
    Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** prohibits that virtual table from being used from within triggers and ** views. **
    @@ -9850,18 +10219,28 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...); ** [[SQLITE_VTAB_INNOCUOUS]]
    SQLITE_VTAB_INNOCUOUS
    **
    Calls of the form ** [sqlite3_vtab_config](db,SQLITE_VTAB_INNOCUOUS) from within the -** the [xConnect] or [xCreate] methods of a [virtual table] implmentation +** the [xConnect] or [xCreate] methods of a [virtual table] implementation ** identify that virtual table as being safe to use from within triggers ** and views. Conceptually, the SQLITE_VTAB_INNOCUOUS tag means that the ** virtual table can do no serious harm even if it is controlled by a ** malicious hacker. Developers should avoid setting the SQLITE_VTAB_INNOCUOUS ** flag unless absolutely necessary. **
    +** +** [[SQLITE_VTAB_USES_ALL_SCHEMAS]]
    SQLITE_VTAB_USES_ALL_SCHEMAS
    +**
    Calls of the form +** [sqlite3_vtab_config](db,SQLITE_VTAB_USES_ALL_SCHEMA) from within the +** the [xConnect] or [xCreate] methods of a [virtual table] implementation +** instruct the query planner to begin at least a read transaction on +** all schemas ("main", "temp", and any ATTACH-ed databases) whenever the +** virtual table is used. +**
    ** */ #define SQLITE_VTAB_CONSTRAINT_SUPPORT 1 #define SQLITE_VTAB_INNOCUOUS 2 #define SQLITE_VTAB_DIRECTONLY 3 +#define SQLITE_VTAB_USES_ALL_SCHEMAS 4 /* ** CAPI3REF: Determine The Virtual Table Conflict Policy @@ -9934,7 +10313,7 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*); **
  • Otherwise, "BINARY" is returned. ** */ -SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int); +SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int); /* ** CAPI3REF: Determine if a virtual table query is DISTINCT @@ -10022,7 +10401,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*); ** communicated to the xBestIndex method as a ** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use ** this constraint, it must set the corresponding -** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under +** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under ** the usual mode of handling IN operators, SQLite generates [bytecode] ** that invokes the [xFilter|xFilter() method] once for each value ** on the right-hand side of the IN operator.)^ Thus the virtual table @@ -10091,21 +10470,20 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle); ** is undefined and probably harmful. ** ** The X parameter in a call to sqlite3_vtab_in_first(X,P) or -** sqlite3_vtab_in_next(X,P) must be one of the parameters to the +** sqlite3_vtab_in_next(X,P) should be one of the parameters to the ** xFilter method which invokes these routines, and specifically ** a parameter that was previously selected for all-at-once IN constraint ** processing use the [sqlite3_vtab_in()] interface in the ** [xBestIndex|xBestIndex method]. ^(If the X parameter is not ** an xFilter argument that was selected for all-at-once IN constraint -** processing, then these routines return [SQLITE_MISUSE])^ or perhaps -** exhibit some other undefined or harmful behavior. +** processing, then these routines return [SQLITE_ERROR].)^ ** ** ^(Use these routines to access all values on the right-hand side ** of the IN constraint using code like the following: ** **

     **    for(rc=sqlite3_vtab_in_first(pList, &pVal);
    -**        rc==SQLITE_OK && pVal
    +**        rc==SQLITE_OK && pVal;
     **        rc=sqlite3_vtab_in_next(pList, &pVal)
     **    ){
     **      // do something with pVal
    @@ -10203,6 +10581,10 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
     ** managed by the prepared statement S and will be automatically freed when
     ** S is finalized.
     **
    +** Not all values are available for all query elements. When a value is
    +** not available, the output variable is set to -1 if the value is numeric,
    +** or to NULL if it is a string (SQLITE_SCANSTAT_NAME).
    +**
     ** 
    ** [[SQLITE_SCANSTAT_NLOOP]]
    SQLITE_SCANSTAT_NLOOP
    **
    ^The [sqlite3_int64] variable pointed to by the V parameter will be @@ -10230,12 +10612,24 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN] ** description for the X-th loop. ** -** [[SQLITE_SCANSTAT_SELECTID]]
    SQLITE_SCANSTAT_SELECT
    +** [[SQLITE_SCANSTAT_SELECTID]]
    SQLITE_SCANSTAT_SELECTID
    **
    ^The "int" variable pointed to by the V parameter will be set to the -** "select-id" for the X-th loop. The select-id identifies which query or -** subquery the loop is part of. The main query has a select-id of zero. -** The select-id is the same value as is output in the first column -** of an [EXPLAIN QUERY PLAN] query. +** id for the X-th query plan element. The id value is unique within the +** statement. The select-id is the same value as is output in the first +** column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_PARENTID]]
    SQLITE_SCANSTAT_PARENTID
    +**
    The "int" variable pointed to by the V parameter will be set to the +** the id of the parent of the current query element, if applicable, or +** to zero if the query element has no parent. This is the same value as +** returned in the second column of an [EXPLAIN QUERY PLAN] query. +** +** [[SQLITE_SCANSTAT_NCYCLE]]
    SQLITE_SCANSTAT_NCYCLE
    +**
    The sqlite3_int64 output value is set to the number of cycles, +** according to the processor time-stamp counter, that elapsed while the +** query element was being processed. This value is not available for +** all query elements - if it is unavailable the output variable is +** set to -1. **
    */ #define SQLITE_SCANSTAT_NLOOP 0 @@ -10244,12 +10638,14 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** #define SQLITE_SCANSTAT_NAME 3 #define SQLITE_SCANSTAT_EXPLAIN 4 #define SQLITE_SCANSTAT_SELECTID 5 +#define SQLITE_SCANSTAT_PARENTID 6 +#define SQLITE_SCANSTAT_NCYCLE 7 /* ** CAPI3REF: Prepared Statement Scan Status ** METHOD: sqlite3_stmt ** -** This interface returns information about the predicted and measured +** These interfaces return information about the predicted and measured ** performance for pStmt. Advanced applications can use this ** interface to compare the predicted and the measured performance and ** issue warnings and/or rerun [ANALYZE] if discrepancies are found. @@ -10260,19 +10656,25 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value ** ** ** The "iScanStatusOp" parameter determines which status information to return. ** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior -** of this interface is undefined. -** ^The requested measurement is written into a variable pointed to by -** the "pOut" parameter. -** Parameter "idx" identifies the specific loop to retrieve statistics for. -** Loops are numbered starting from zero. ^If idx is out of range - less than -** zero or greater than or equal to the total number of loops used to implement -** the statement - a non-zero value is returned and the variable that pOut -** points to is unchanged. -** -** ^Statistics might not be available for all loops in all statements. ^In cases -** where there exist loops with no available statistics, this function behaves -** as if the loop did not exist - it returns non-zero and leave the variable -** that pOut points to unchanged. +** of this interface is undefined. ^The requested measurement is written into +** a variable pointed to by the "pOut" parameter. +** +** The "flags" parameter must be passed a mask of flags. At present only +** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX +** is specified, then status information is available for all elements +** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If +** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements +** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of +** the EXPLAIN QUERY PLAN output) are available. Invoking API +** sqlite3_stmt_scanstatus() is equivalent to calling +** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter. +** +** Parameter "idx" identifies the specific query element to retrieve statistics +** for. Query elements are numbered starting from zero. A value of -1 may be +** to query for statistics regarding the entire query. ^If idx is out of range +** - less than -1 or greater than or equal to the total number of query +** elements used to implement the statement - a non-zero value is returned and +** the variable that pOut points to is unchanged. ** ** See also: [sqlite3_stmt_scanstatus_reset()] */ @@ -10282,6 +10684,19 @@ SQLITE_API int sqlite3_stmt_scanstatus( int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ void *pOut /* Result written here */ ); +SQLITE_API int sqlite3_stmt_scanstatus_v2( + sqlite3_stmt *pStmt, /* Prepared statement for which info desired */ + int idx, /* Index of loop to report on */ + int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */ + int flags, /* Mask of flags defined below */ + void *pOut /* Result written here */ +); + +/* +** CAPI3REF: Prepared Statement Scan Status +** KEYWORDS: {scan status flags} +*/ +#define SQLITE_SCANSTAT_COMPLEX 0x0001 /* ** CAPI3REF: Zero Scan-Status Counters @@ -10372,6 +10787,10 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** function is not defined for operations on WITHOUT ROWID tables, or for ** DELETE operations on rowid tables. ** +** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from +** the previous call on the same [database connection] D, or NULL for +** the first call on D. +** ** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()], ** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces ** provide additional information about a preupdate event. These routines @@ -10411,7 +10830,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*); ** When the [sqlite3_blob_write()] API is used to update a blob column, ** the pre-update hook is invoked with SQLITE_DELETE. This is because the ** in this case the new values are not available. In this case, when a -** callback made with op==SQLITE_DELETE is actuall a write using the +** callback made with op==SQLITE_DELETE is actually a write using the ** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns ** the index of the column being written. In other cases, where the ** pre-update hook is being invoked for some other reason, including a @@ -10672,6 +11091,13 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c ** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy ** of the database exists. ** +** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set, +** the returned buffer content will remain accessible and unchanged +** until either the next write operation on the connection or when +** the connection is closed, and applications must not modify the +** buffer. If the bit had been clear, the returned buffer will not +** be accessed by SQLite after the call. +** ** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the ** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory ** allocation error occurs. @@ -10720,6 +11146,9 @@ SQLITE_API unsigned char *sqlite3_serialize( ** SQLite will try to increase the buffer size using sqlite3_realloc64() ** if writes on the database cause it to grow larger than M bytes. ** +** Applications must not modify the buffer P or invalidate it before +** the database connection D is closed. +** ** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the ** database is currently in a read transaction or is involved in a backup ** operation. @@ -10728,6 +11157,13 @@ SQLITE_API unsigned char *sqlite3_serialize( ** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the ** function returns SQLITE_ERROR. ** +** The deserialized database should not be in [WAL mode]. If the database +** is in WAL mode, then any attempt to use the database file will result +** in an [SQLITE_CANTOPEN] error. The application can set the +** [file format version numbers] (bytes 18 and 19) of the input database P +** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the +** database file into rollback mode and work around this limitation. +** ** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the ** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then ** [sqlite3_free()] is invoked on argument P prior to returning. @@ -10777,6 +11213,19 @@ SQLITE_API int sqlite3_deserialize( # undef double #endif +#if defined(__wasi__) +# undef SQLITE_WASI +# define SQLITE_WASI 1 +# undef SQLITE_OMIT_WAL +# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */ +# ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION +# endif +# ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 0 +# endif +#endif + #if 0 } /* End of the 'extern "C"' block */ #endif @@ -10983,16 +11432,20 @@ SQLITE_API int sqlite3session_create( SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); /* -** CAPIREF: Conigure a Session Object +** CAPI3REF: Configure a Session Object ** METHOD: sqlite3_session ** ** This method is used to configure a session object after it has been -** created. At present the only valid value for the second parameter is -** [SQLITE_SESSION_OBJCONFIG_SIZE]. +** created. At present the only valid values for the second parameter are +** [SQLITE_SESSION_OBJCONFIG_SIZE] and [SQLITE_SESSION_OBJCONFIG_ROWID]. ** -** Arguments for sqlite3session_object_config() +*/ +SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); + +/* +** CAPI3REF: Options for sqlite3session_object_config ** -** The following values may passed as the the 4th parameter to +** The following values may passed as the the 2nd parameter to ** sqlite3session_object_config(). ** **
    SQLITE_SESSION_OBJCONFIG_SIZE
    @@ -11008,12 +11461,21 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession); ** ** It is an error (SQLITE_MISUSE) to attempt to modify this setting after ** the first table has been attached to the session object. +** +**
    SQLITE_SESSION_OBJCONFIG_ROWID
    +** This option is used to set, clear or query the flag that enables +** collection of data for tables with no explicit PRIMARY KEY. +** +** Normally, tables with no explicit PRIMARY KEY are simply ignored +** by the sessions module. However, if this flag is set, it behaves +** as if such tables have a column "_rowid_ INTEGER PRIMARY KEY" inserted +** as their leftmost columns. +** +** It is an error (SQLITE_MISUSE) to attempt to modify this setting after +** the first table has been attached to the session object. */ -SQLITE_API int sqlite3session_object_config(sqlite3_session*, int op, void *pArg); - -/* -*/ -#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_SIZE 1 +#define SQLITE_SESSION_OBJCONFIG_ROWID 2 /* ** CAPI3REF: Enable Or Disable A Session Object @@ -11774,6 +12236,18 @@ SQLITE_API int sqlite3changeset_concat( ); +/* +** CAPI3REF: Upgrade the Schema of a Changeset/Patchset +*/ +SQLITE_API int sqlite3changeset_upgrade( + sqlite3 *db, + const char *zDb, + int nIn, const void *pIn, /* Input changeset */ + int *pnOut, void **ppOut /* OUT: Inverse of input */ +); + + + /* ** CAPI3REF: Changegroup Handle ** @@ -11820,6 +12294,38 @@ typedef struct sqlite3_changegroup sqlite3_changegroup; */ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); +/* +** CAPI3REF: Add a Schema to a Changegroup +** METHOD: sqlite3_changegroup_schema +** +** This method may be used to optionally enforce the rule that the changesets +** added to the changegroup handle must match the schema of database zDb +** ("main", "temp", or the name of an attached database). If +** sqlite3changegroup_add() is called to add a changeset that is not compatible +** with the configured schema, SQLITE_SCHEMA is returned and the changegroup +** object is left in an undefined state. +** +** A changeset schema is considered compatible with the database schema in +** the same way as for sqlite3changeset_apply(). Specifically, for each +** table in the changeset, there exists a database table with: +** +**
      +**
    • The name identified by the changeset, and +**
    • at least as many columns as recorded in the changeset, and +**
    • the primary key columns in the same position as recorded in +** the changeset. +**
    +** +** The output of the changegroup object always has the same schema as the +** database nominated using this function. In cases where changesets passed +** to sqlite3changegroup_add() have fewer columns than the corresponding table +** in the database schema, these are filled in using the default column +** values from the database schema. This makes it possible to combined +** changesets that have different numbers of columns for a single table +** within a changegroup, provided that they are otherwise compatible. +*/ +SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb); + /* ** CAPI3REF: Add A Changeset To A Changegroup ** METHOD: sqlite3_changegroup @@ -11888,13 +12394,18 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp); ** If the new changeset contains changes to a table that is already present ** in the changegroup, then the number of columns and the position of the ** primary key columns for the table must be consistent. If this is not the -** case, this function fails with SQLITE_SCHEMA. If the input changeset -** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is -** returned. Or, if an out-of-memory condition occurs during processing, this -** function returns SQLITE_NOMEM. In all cases, if an error occurs the state -** of the final contents of the changegroup is undefined. +** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup +** object has been configured with a database schema using the +** sqlite3changegroup_schema() API, then it is possible to combine changesets +** with different numbers of columns for a single table, provided that +** they are otherwise compatible. +** +** If the input changeset appears to be corrupt and the corruption is +** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition +** occurs during processing, this function returns SQLITE_NOMEM. ** -** If no error occurs, SQLITE_OK is returned. +** In all cases, if an error occurs the state of the final contents of the +** changegroup is undefined. If no error occurs, SQLITE_OK is returned. */ SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData); @@ -12146,9 +12657,30 @@ SQLITE_API int sqlite3changeset_apply_v2( ** Invert the changeset before applying it. This is equivalent to inverting ** a changeset using sqlite3changeset_invert() before applying it. It is ** an error to specify this flag with a patchset. +** +**
    SQLITE_CHANGESETAPPLY_IGNORENOOP
    +** Do not invoke the conflict handler callback for any changes that +** would not actually modify the database even if they were applied. +** Specifically, this means that the conflict handler is not invoked +** for: +**
      +**
    • a delete change if the row being deleted cannot be found, +**
    • an update change if the modified fields are already set to +** their new values in the conflicting row, or +**
    • an insert change if all fields of the conflicting row match +** the row being inserted. +**
    +** +**
    SQLITE_CHANGESETAPPLY_FKNOACTION
    +** If this flag it set, then all foreign key constraints in the target +** database behave as if they were declared with "ON UPDATE NO ACTION ON +** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL +** or SET DEFAULT. */ #define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001 #define SQLITE_CHANGESETAPPLY_INVERT 0x0002 +#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004 +#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008 /* ** CAPI3REF: Constants Passed To The Conflict Handler @@ -12714,8 +13246,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -12725,8 +13260,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -12742,12 +13279,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -12773,6 +13311,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -12887,6 +13429,39 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { int iVersion; /* Currently always set to 3 */ @@ -12924,6 +13499,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* @@ -13118,8 +13700,8 @@ struct Fts5ExtensionApi { ** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; @@ -13167,7 +13749,7 @@ struct fts5_api { int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) ); @@ -13176,7 +13758,7 @@ struct fts5_api { int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer ); @@ -13184,7 +13766,7 @@ struct fts5_api { int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) ); @@ -13215,7 +13797,7 @@ struct fts5_api { ** autoconf-based build */ #if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) -#include "config.h" +#include "sqlite_cfg.h" #define SQLITECONFIG_H 1 #endif @@ -13295,7 +13877,7 @@ struct fts5_api { ** level of recursion for each term. A stack overflow can result ** if the number of terms is too large. In practice, most SQL ** never has more than 3 or 4 terms. Use a value of 0 to disable -** any limit on the number of terms in a compount SELECT. +** any limit on the number of terms in a compound SELECT. */ #ifndef SQLITE_MAX_COMPOUND_SELECT # define SQLITE_MAX_COMPOUND_SELECT 500 @@ -13410,7 +13992,7 @@ struct fts5_api { ** max_page_count macro. */ #ifndef SQLITE_MAX_PAGE_COUNT -# define SQLITE_MAX_PAGE_COUNT 1073741823 +# define SQLITE_MAX_PAGE_COUNT 0xfffffffe /* 4294967294 */ #endif /* @@ -13445,8 +14027,8 @@ struct fts5_api { #endif /* -** WAL mode depends on atomic aligned 32-bit loads and stores in a few -** places. The following macros try to make this explicit. +** A few places in the code require atomic load/store of aligned +** integer values. */ #ifndef __has_extension # define __has_extension(x) 0 /* compatibility with non-clang compilers */ @@ -13502,15 +14084,22 @@ struct fts5_api { #endif /* -** A macro to hint to the compiler that a function should not be +** Macros to hint to the compiler that a function should or should not be ** inlined. */ #if defined(__GNUC__) # define SQLITE_NOINLINE __attribute__((noinline)) +# define SQLITE_INLINE __attribute__((always_inline)) inline #elif defined(_MSC_VER) && _MSC_VER>=1310 # define SQLITE_NOINLINE __declspec(noinline) +# define SQLITE_INLINE __forceinline #else # define SQLITE_NOINLINE +# define SQLITE_INLINE +#endif +#if defined(SQLITE_COVERAGE_TEST) || defined(__STRICT_ANSI__) +# undef SQLITE_INLINE +# define SQLITE_INLINE #endif /* @@ -13532,6 +14121,29 @@ struct fts5_api { # endif #endif +/* +** Enable SQLITE_USE_SEH by default on MSVC builds. Only omit +** SEH support if the -DSQLITE_OMIT_SEH option is given. +*/ +#if defined(_MSC_VER) && !defined(SQLITE_OMIT_SEH) +# define SQLITE_USE_SEH 1 +#else +# undef SQLITE_USE_SEH +#endif + +/* +** Enable SQLITE_DIRECT_OVERFLOW_READ, unless the build explicitly +** disables it using -DSQLITE_DIRECT_OVERFLOW_READ=0 +*/ +#if defined(SQLITE_DIRECT_OVERFLOW_READ) && SQLITE_DIRECT_OVERFLOW_READ+1==1 + /* Disable if -DSQLITE_DIRECT_OVERFLOW_READ=0 */ +# undef SQLITE_DIRECT_OVERFLOW_READ +#else + /* In all other cases, enable */ +# define SQLITE_DIRECT_OVERFLOW_READ 1 +#endif + + /* ** The SQLITE_THREADSAFE macro must be defined as 0, 1, or 2. ** 0 means mutexes are permanently disable and the library is never @@ -14328,15 +14940,9 @@ typedef INT8_TYPE i8; /* 1-byte signed integer */ /* ** The datatype used to store estimates of the number of rows in a -** table or index. This is an unsigned integer type. For 99.9% of -** the world, a 32-bit integer is sufficient. But a 64-bit integer -** can be used at compile-time if desired. +** table or index. */ -#ifdef SQLITE_64BIT_STATS - typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */ -#else - typedef u32 tRowcnt; /* 32-bit is the default */ -#endif +typedef u64 tRowcnt; /* ** Estimated quantities used for query planning are stored as 16-bit @@ -14397,8 +15003,31 @@ typedef INT16_TYPE LogEst; ** the end of buffer S. This macro returns true if P points to something ** contained within the buffer S. */ -#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E))) +#define SQLITE_WITHIN(P,S,E) (((uptr)(P)>=(uptr)(S))&&((uptr)(P)<(uptr)(E))) +/* +** P is one byte past the end of a large buffer. Return true if a span of bytes +** between S..E crosses the end of that buffer. In other words, return true +** if the sub-buffer S..E-1 overflows the buffer whose last byte is P-1. +** +** S is the start of the span. E is one byte past the end of end of span. +** +** P +** |-----------------| FALSE +** |-------| +** S E +** +** P +** |-----------------| +** |-------| TRUE +** S E +** +** P +** |-----------------| +** |-------| FALSE +** S E +*/ +#define SQLITE_OVERFLOW(P,S,E) (((uptr)(S)<(uptr)(P))&&((uptr)(E)>(uptr)(P))) /* ** Macros to determine whether the machine is big or little endian, @@ -14408,16 +15037,33 @@ typedef INT16_TYPE LogEst; ** using C-preprocessor macros. If that is unsuccessful, or if ** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined ** at run-time. +** +** If you are building SQLite on some obscure platform for which the +** following ifdef magic does not work, you can always include either: +** +** -DSQLITE_BYTEORDER=1234 +** +** or +** +** -DSQLITE_BYTEORDER=4321 +** +** to cause the build to work for little-endian or big-endian processors, +** respectively. */ -#ifndef SQLITE_BYTEORDER -# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ +#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */ +# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__ +# define SQLITE_BYTEORDER 4321 +# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ +# define SQLITE_BYTEORDER 1234 +# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1 +# define SQLITE_BYTEORDER 4321 +# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) -# define SQLITE_BYTEORDER 1234 -# elif defined(sparc) || defined(__ppc__) || \ - defined(__ARMEB__) || defined(__AARCH64EB__) -# define SQLITE_BYTEORDER 4321 +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__) +# define SQLITE_BYTEORDER 4321 # else # define SQLITE_BYTEORDER 0 # endif @@ -14482,9 +15128,9 @@ typedef INT16_TYPE LogEst; ** pointers. In that case, only verify 4-byte alignment. */ #ifdef SQLITE_4_BYTE_ALIGNED_MALLOC -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0) +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0) #else -# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0) +# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0) #endif /* @@ -14538,15 +15184,39 @@ SQLITE_PRIVATE u32 sqlite3TreeTrace; && (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE) \ || defined(SQLITE_ENABLE_TREETRACE)) # define TREETRACE_ENABLED 1 -# define SELECTTRACE(K,P,S,X) \ +# define TREETRACE(K,P,S,X) \ if(sqlite3TreeTrace&(K)) \ sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\ sqlite3DebugPrintf X #else -# define SELECTTRACE(K,P,S,X) +# define TREETRACE(K,P,S,X) # define TREETRACE_ENABLED 0 #endif +/* TREETRACE flag meanings: +** +** 0x00000001 Beginning and end of SELECT processing +** 0x00000002 WHERE clause processing +** 0x00000004 Query flattener +** 0x00000008 Result-set wildcard expansion +** 0x00000010 Query name resolution +** 0x00000020 Aggregate analysis +** 0x00000040 Window functions +** 0x00000080 Generated column names +** 0x00000100 Move HAVING terms into WHERE +** 0x00000200 Count-of-view optimization +** 0x00000400 Compound SELECT processing +** 0x00000800 Drop superfluous ORDER BY +** 0x00001000 LEFT JOIN simplifies to JOIN +** 0x00002000 Constant propagation +** 0x00004000 Push-down optimization +** 0x00008000 After all FROM-clause analysis +** 0x00010000 Beginning of DELETE/INSERT/UPDATE processing +** 0x00020000 Transform DISTINCT into GROUP BY +** 0x00040000 SELECT tree dump after all code has been generated +** 0x00080000 NOT NULL strength reduction +*/ + /* ** Macros for "wheretrace" */ @@ -14559,6 +15229,36 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace; # define WHERETRACE(K,X) #endif +/* +** Bits for the sqlite3WhereTrace mask: +** +** (---any--) Top-level block structure +** 0x-------F High-level debug messages +** 0x----FFF- More detail +** 0xFFFF---- Low-level debug messages +** +** 0x00000001 Code generation +** 0x00000002 Solver +** 0x00000004 Solver costs +** 0x00000008 WhereLoop inserts +** +** 0x00000010 Display sqlite3_index_info xBestIndex calls +** 0x00000020 Range an equality scan metrics +** 0x00000040 IN operator decisions +** 0x00000080 WhereLoop cost adjustements +** 0x00000100 +** 0x00000200 Covering index decisions +** 0x00000400 OR optimization +** 0x00000800 Index scanner +** 0x00001000 More details associated with code generation +** 0x00002000 +** 0x00004000 Show all WHERE terms at key points +** 0x00008000 Show the full SELECT statement at key places +** +** 0x00010000 Show more detail when printing WHERE terms +** 0x00020000 Show WHERE terms returned from whereScanNext() +*/ + /* ** An instance of the following structure is used to store the busy-handler @@ -14579,7 +15279,7 @@ struct BusyHandler { /* ** Name of table that holds the database schema. ** -** The PREFERRED names are used whereever possible. But LEGACY is also +** The PREFERRED names are used wherever possible. But LEGACY is also ** used for backwards compatibility. ** ** 1. Queries can use either the PREFERRED or the LEGACY names @@ -14688,16 +15388,19 @@ typedef struct Column Column; typedef struct Cte Cte; typedef struct CteUse CteUse; typedef struct Db Db; +typedef struct DbClientData DbClientData; typedef struct DbFixer DbFixer; typedef struct Schema Schema; typedef struct Expr Expr; typedef struct ExprList ExprList; typedef struct FKey FKey; +typedef struct FpDecode FpDecode; typedef struct FuncDestructor FuncDestructor; typedef struct FuncDef FuncDef; typedef struct FuncDefHash FuncDefHash; typedef struct IdList IdList; typedef struct Index Index; +typedef struct IndexedExpr IndexedExpr; typedef struct IndexSample IndexSample; typedef struct KeyClass KeyClass; typedef struct KeyInfo KeyInfo; @@ -14710,6 +15413,7 @@ typedef struct Parse Parse; typedef struct ParseCleanup ParseCleanup; typedef struct PreUpdate PreUpdate; typedef struct PrintfArguments PrintfArguments; +typedef struct RCStr RCStr; typedef struct RenameToken RenameToken; typedef struct Returning Returning; typedef struct RowSet RowSet; @@ -14763,6 +15467,7 @@ typedef struct With With; #define MASKBIT32(n) (((unsigned int)1)<<(n)) #define SMASKBIT32(n) ((n)<=31?((unsigned int)1)<<(n):0) #define ALLBITS ((Bitmask)-1) +#define TOPBIT (((Bitmask)1)<<(BMS-1)) /* A VList object records a mapping between parameters/variables/wildcards ** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer @@ -14777,6 +15482,331 @@ typedef int VList; ** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque ** pointer types (i.e. FuncDef) defined above. */ +/************** Include os.h in the middle of sqliteInt.h ********************/ +/************** Begin file os.h **********************************************/ +/* +** 2001 September 16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This header file (together with is companion C source-code file +** "os.c") attempt to abstract the underlying operating system so that +** the SQLite library will work on both POSIX and windows systems. +** +** This header file is #include-ed by sqliteInt.h and thus ends up +** being included by every source file. +*/ +#ifndef _SQLITE_OS_H_ +#define _SQLITE_OS_H_ + +/* +** Attempt to automatically detect the operating system and setup the +** necessary pre-processor macros for it. +*/ +/************** Include os_setup.h in the middle of os.h *********************/ +/************** Begin file os_setup.h ****************************************/ +/* +** 2013 November 25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains pre-processor directives related to operating system +** detection and/or setup. +*/ +#ifndef SQLITE_OS_SETUP_H +#define SQLITE_OS_SETUP_H + +/* +** Figure out if we are dealing with Unix, Windows, or some other operating +** system. +** +** After the following block of preprocess macros, all of +** +** SQLITE_OS_KV +** SQLITE_OS_OTHER +** SQLITE_OS_UNIX +** SQLITE_OS_WIN +** +** will defined to either 1 or 0. One of them will be 1. The others will be 0. +** If none of the macros are initially defined, then select either +** SQLITE_OS_UNIX or SQLITE_OS_WIN depending on the target platform. +** +** If SQLITE_OS_OTHER=1 is specified at compile-time, then the application +** must provide its own VFS implementation together with sqlite3_os_init() +** and sqlite3_os_end() routines. +*/ +#if !defined(SQLITE_OS_KV) && !defined(SQLITE_OS_OTHER) && \ + !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_WIN) +# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ + defined(__MINGW32__) || defined(__BORLANDC__) +# define SQLITE_OS_WIN 1 +# define SQLITE_OS_UNIX 0 +# else +# define SQLITE_OS_WIN 0 +# define SQLITE_OS_UNIX 1 +# endif +#endif +#if SQLITE_OS_OTHER+1>1 +# undef SQLITE_OS_KV +# define SQLITE_OS_KV 0 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +#endif +#if SQLITE_OS_KV+1>1 +# undef SQLITE_OS_OTHER +# define SQLITE_OS_OTHER 0 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +# define SQLITE_OMIT_LOAD_EXTENSION 1 +# define SQLITE_OMIT_WAL 1 +# define SQLITE_OMIT_DEPRECATED 1 +# undef SQLITE_TEMP_STORE +# define SQLITE_TEMP_STORE 3 /* Always use memory for temporary storage */ +# define SQLITE_DQS 0 +# define SQLITE_OMIT_SHARED_CACHE 1 +# define SQLITE_OMIT_AUTOINIT 1 +#endif +#if SQLITE_OS_UNIX+1>1 +# undef SQLITE_OS_KV +# define SQLITE_OS_KV 0 +# undef SQLITE_OS_OTHER +# define SQLITE_OS_OTHER 0 +# undef SQLITE_OS_WIN +# define SQLITE_OS_WIN 0 +#endif +#if SQLITE_OS_WIN+1>1 +# undef SQLITE_OS_KV +# define SQLITE_OS_KV 0 +# undef SQLITE_OS_OTHER +# define SQLITE_OS_OTHER 0 +# undef SQLITE_OS_UNIX +# define SQLITE_OS_UNIX 0 +#endif + + +#endif /* SQLITE_OS_SETUP_H */ + +/************** End of os_setup.h ********************************************/ +/************** Continuing where we left off in os.h *************************/ + +/* If the SET_FULLSYNC macro is not defined above, then make it +** a no-op +*/ +#ifndef SET_FULLSYNC +# define SET_FULLSYNC(x,y) +#endif + +/* Maximum pathname length. Note: FILENAME_MAX defined by stdio.h +*/ +#ifndef SQLITE_MAX_PATHLEN +# define SQLITE_MAX_PATHLEN FILENAME_MAX +#endif + +/* Maximum number of symlinks that will be resolved while trying to +** expand a filename in xFullPathname() in the VFS. +*/ +#ifndef SQLITE_MAX_SYMLINK +# define SQLITE_MAX_SYMLINK 200 +#endif + +/* +** The default size of a disk sector +*/ +#ifndef SQLITE_DEFAULT_SECTOR_SIZE +# define SQLITE_DEFAULT_SECTOR_SIZE 4096 +#endif + +/* +** Temporary files are named starting with this prefix followed by 16 random +** alphanumeric characters, and no file extension. They are stored in the +** OS's standard temporary file directory, and are deleted prior to exit. +** If sqlite is being embedded in another program, you may wish to change the +** prefix to reflect your program's name, so that if your program exits +** prematurely, old temporary files can be easily identified. This can be done +** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. +** +** 2006-10-31: The default prefix used to be "sqlite_". But then +** Mcafee started using SQLite in their anti-virus product and it +** started putting files with the "sqlite" name in the c:/temp folder. +** This annoyed many windows users. Those users would then do a +** Google search for "sqlite", find the telephone numbers of the +** developers and call to wake them up at night and complain. +** For this reason, the default name prefix is changed to be "sqlite" +** spelled backwards. So the temp files are still identified, but +** anybody smart enough to figure out the code is also likely smart +** enough to know that calling the developer will not help get rid +** of the file. +*/ +#ifndef SQLITE_TEMP_FILE_PREFIX +# define SQLITE_TEMP_FILE_PREFIX "etilqs_" +#endif + +/* +** The following values may be passed as the second argument to +** sqlite3OsLock(). The various locks exhibit the following semantics: +** +** SHARED: Any number of processes may hold a SHARED lock simultaneously. +** RESERVED: A single process may hold a RESERVED lock on a file at +** any time. Other processes may hold and obtain new SHARED locks. +** PENDING: A single process may hold a PENDING lock on a file at +** any one time. Existing SHARED locks may persist, but no new +** SHARED locks may be obtained by other processes. +** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. +** +** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a +** process that requests an EXCLUSIVE lock may actually obtain a PENDING +** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to +** sqlite3OsLock(). +*/ +#define NO_LOCK 0 +#define SHARED_LOCK 1 +#define RESERVED_LOCK 2 +#define PENDING_LOCK 3 +#define EXCLUSIVE_LOCK 4 + +/* +** File Locking Notes: (Mostly about windows but also some info for Unix) +** +** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because +** those functions are not available. So we use only LockFile() and +** UnlockFile(). +** +** LockFile() prevents not just writing but also reading by other processes. +** A SHARED_LOCK is obtained by locking a single randomly-chosen +** byte out of a specific range of bytes. The lock byte is obtained at +** random so two separate readers can probably access the file at the +** same time, unless they are unlucky and choose the same lock byte. +** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. +** There can only be one writer. A RESERVED_LOCK is obtained by locking +** a single byte of the file that is designated as the reserved lock byte. +** A PENDING_LOCK is obtained by locking a designated byte different from +** the RESERVED_LOCK byte. +** +** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, +** which means we can use reader/writer locks. When reader/writer locks +** are used, the lock is placed on the same range of bytes that is used +** for probabilistic locking in Win95/98/ME. Hence, the locking scheme +** will support two or more Win95 readers or two or more WinNT readers. +** But a single Win95 reader will lock out all WinNT readers and a single +** WinNT reader will lock out all other Win95 readers. +** +** The following #defines specify the range of bytes used for locking. +** SHARED_SIZE is the number of bytes available in the pool from which +** a random byte is selected for a shared lock. The pool of bytes for +** shared locks begins at SHARED_FIRST. +** +** The same locking strategy and +** byte ranges are used for Unix. This leaves open the possibility of having +** clients on win95, winNT, and unix all talking to the same shared file +** and all locking correctly. To do so would require that samba (or whatever +** tool is being used for file sharing) implements locks correctly between +** windows and unix. I'm guessing that isn't likely to happen, but by +** using the same locking range we are at least open to the possibility. +** +** Locking in windows is manditory. For this reason, we cannot store +** actual data in the bytes used for locking. The pager never allocates +** the pages involved in locking therefore. SHARED_SIZE is selected so +** that all locks will fit on a single page even at the minimum page size. +** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE +** is set high so that we don't have to allocate an unused page except +** for very large databases. But one should test the page skipping logic +** by setting PENDING_BYTE low and running the entire regression suite. +** +** Changing the value of PENDING_BYTE results in a subtly incompatible +** file format. Depending on how it is changed, you might not notice +** the incompatibility right away, even running a full regression test. +** The default location of PENDING_BYTE is the first byte past the +** 1GB boundary. +** +*/ +#ifdef SQLITE_OMIT_WSD +# define PENDING_BYTE (0x40000000) +#else +# define PENDING_BYTE sqlite3PendingByte +#endif +#define RESERVED_BYTE (PENDING_BYTE+1) +#define SHARED_FIRST (PENDING_BYTE+2) +#define SHARED_SIZE 510 + +/* +** Wrapper around OS specific sqlite3_os_init() function. +*/ +SQLITE_PRIVATE int sqlite3OsInit(void); + +/* +** Functions for accessing sqlite3_file methods +*/ +SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*); +SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); +SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); +SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size); +SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int); +SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); +SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int); +SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int); +SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); +SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*); +SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*); +#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 +SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id); +SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id); +#ifndef SQLITE_OMIT_WAL +SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); +SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int); +SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id); +SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int); +#endif /* SQLITE_OMIT_WAL */ +SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **); +SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *); + + +/* +** Functions for accessing sqlite3_vfs methods +*/ +SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); +SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int); +SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); +SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); +#ifndef SQLITE_OMIT_LOAD_EXTENSION +SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); +SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *); +SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); +SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *); +#endif /* SQLITE_OMIT_LOAD_EXTENSION */ +SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *); +SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int); +SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs*); +SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); + +/* +** Convenience functions for opening and closing files using +** sqlite3_malloc() to obtain space for the file-handle structure. +*/ +SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); +SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *); + +#endif /* _SQLITE_OS_H_ */ + +/************** End of os.h **************************************************/ +/************** Continuing where we left off in sqliteInt.h ******************/ /************** Include pager.h in the middle of sqliteInt.h *****************/ /************** Begin file pager.h *******************************************/ /* @@ -15002,7 +16032,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager*); SQLITE_PRIVATE const char *sqlite3PagerJournalname(Pager*); SQLITE_PRIVATE void *sqlite3PagerTempSpace(Pager*); SQLITE_PRIVATE int sqlite3PagerIsMemdb(Pager*); -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, int *); +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *, int, int, u64*); SQLITE_PRIVATE void sqlite3PagerClearCache(Pager*); SQLITE_PRIVATE int sqlite3SectorSize(sqlite3_file *); @@ -15032,6 +16062,10 @@ SQLITE_PRIVATE void sqlite3PagerRefdump(Pager*); # define enable_simulated_io_errors() #endif +#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL) +SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager*); +#endif + #endif /* SQLITE_PAGER_H */ /************** End of pager.h ***********************************************/ @@ -15223,7 +16257,7 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p); ** reduce network bandwidth. ** ** Note that BTREE_HINT_FLAGS with BTREE_BULKLOAD is the only hint used by -** standard SQLite. The other hints are provided for extentions that use +** standard SQLite. The other hints are provided for extensions that use ** the SQLite parser and code generator but substitute their own storage ** engine. */ @@ -15361,15 +16395,21 @@ SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int flags); SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeCursorPin(BtCursor*); SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor*); -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor*); -#endif SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*); SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt); SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*); SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor*); -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(sqlite3*,Btree*,Pgno*aRoot,int nRoot,int,int*); +SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( + sqlite3 *db, /* Database connection that is running the check */ + Btree *p, /* The btree to be checked */ + Pgno *aRoot, /* An array of root pages numbers for individual trees */ + int nRoot, /* Number of entries in aRoot[] */ + int mxErr, /* Stop reporting errors after this many */ + int *pnErr, /* OUT: Write number of errors seen to this variable */ + char **pzOut /* OUT: Write the error message string here */ +); SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*); SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor*); @@ -15408,6 +16448,8 @@ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree*, int, int *, int *); SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor*, BtCursor*, i64); +SQLITE_PRIVATE void sqlite3BtreeClearCache(Btree*); + /* ** If we are not using shared cache, then there is no need to ** use mutexes to access the BtShared structures. So make the @@ -15524,14 +16566,14 @@ struct VdbeOp { #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS char *zComment; /* Comment to improve readability */ #endif -#ifdef VDBE_PROFILE - u32 cnt; /* Number of times this instruction was executed */ - u64 cycles; /* Total time spent executing this instruction */ -#endif #ifdef SQLITE_VDBE_COVERAGE u32 iSrcLine; /* Source-code line that generated this opcode ** with flags in the upper 8 bits */ #endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + u64 nExec; + u64 nCycle; +#endif }; typedef struct VdbeOp VdbeOp; @@ -15583,6 +16625,7 @@ typedef struct VdbeOpList VdbeOpList; #define P4_INT64 (-13) /* P4 is a 64-bit signed integer */ #define P4_INTARRAY (-14) /* P4 is a vector of 32-bit integers */ #define P4_FUNCCTX (-15) /* P4 is a pointer to an sqlite3_context object */ +#define P4_TABLEREF (-16) /* Like P4_TABLE, but reference counted */ /* Error message codes for OP_Halt */ #define P5_ConstraintNotNull 1 @@ -15632,48 +16675,48 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Vacuum 5 #define OP_VFilter 6 /* jump, synopsis: iplan=r[P3] zplan='P4' */ #define OP_VUpdate 7 /* synopsis: data=r[P3@P2] */ -#define OP_Goto 8 /* jump */ -#define OP_Gosub 9 /* jump */ -#define OP_InitCoroutine 10 /* jump */ -#define OP_Yield 11 /* jump */ -#define OP_MustBeInt 12 /* jump */ -#define OP_Jump 13 /* jump */ -#define OP_Once 14 /* jump */ -#define OP_If 15 /* jump */ -#define OP_IfNot 16 /* jump */ -#define OP_IsNullOrType 17 /* jump, synopsis: if typeof(r[P1]) IN (P3,5) goto P2 */ -#define OP_IfNullRow 18 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */ +#define OP_Init 8 /* jump, synopsis: Start at P2 */ +#define OP_Goto 9 /* jump */ +#define OP_Gosub 10 /* jump */ +#define OP_InitCoroutine 11 /* jump */ +#define OP_Yield 12 /* jump */ +#define OP_MustBeInt 13 /* jump */ +#define OP_Jump 14 /* jump */ +#define OP_Once 15 /* jump */ +#define OP_If 16 /* jump */ +#define OP_IfNot 17 /* jump */ +#define OP_IsType 18 /* jump, synopsis: if typeof(P1.P3) in P5 goto P2 */ #define OP_Not 19 /* same as TK_NOT, synopsis: r[P2]= !r[P1] */ -#define OP_SeekLT 20 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekLE 21 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGE 22 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekGT 23 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IfNotOpen 24 /* jump, synopsis: if( !csr[P1] ) goto P2 */ -#define OP_IfNoHope 25 /* jump, synopsis: key=r[P3@P4] */ -#define OP_NoConflict 26 /* jump, synopsis: key=r[P3@P4] */ -#define OP_NotFound 27 /* jump, synopsis: key=r[P3@P4] */ -#define OP_Found 28 /* jump, synopsis: key=r[P3@P4] */ -#define OP_SeekRowid 29 /* jump, synopsis: intkey=r[P3] */ -#define OP_NotExists 30 /* jump, synopsis: intkey=r[P3] */ -#define OP_Last 31 /* jump */ -#define OP_IfSmaller 32 /* jump */ -#define OP_SorterSort 33 /* jump */ -#define OP_Sort 34 /* jump */ -#define OP_Rewind 35 /* jump */ -#define OP_SorterNext 36 /* jump */ -#define OP_Prev 37 /* jump */ -#define OP_Next 38 /* jump */ -#define OP_IdxLE 39 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxGT 40 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxLT 41 /* jump, synopsis: key=r[P3@P4] */ -#define OP_IdxGE 42 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IfNullRow 20 /* jump, synopsis: if P1.nullRow then r[P3]=NULL, goto P2 */ +#define OP_SeekLT 21 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekLE 22 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekGE 23 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekGT 24 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IfNotOpen 25 /* jump, synopsis: if( !csr[P1] ) goto P2 */ +#define OP_IfNoHope 26 /* jump, synopsis: key=r[P3@P4] */ +#define OP_NoConflict 27 /* jump, synopsis: key=r[P3@P4] */ +#define OP_NotFound 28 /* jump, synopsis: key=r[P3@P4] */ +#define OP_Found 29 /* jump, synopsis: key=r[P3@P4] */ +#define OP_SeekRowid 30 /* jump, synopsis: intkey=r[P3] */ +#define OP_NotExists 31 /* jump, synopsis: intkey=r[P3] */ +#define OP_Last 32 /* jump */ +#define OP_IfSmaller 33 /* jump */ +#define OP_SorterSort 34 /* jump */ +#define OP_Sort 35 /* jump */ +#define OP_Rewind 36 /* jump */ +#define OP_SorterNext 37 /* jump */ +#define OP_Prev 38 /* jump */ +#define OP_Next 39 /* jump */ +#define OP_IdxLE 40 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IdxGT 41 /* jump, synopsis: key=r[P3@P4] */ +#define OP_IdxLT 42 /* jump, synopsis: key=r[P3@P4] */ #define OP_Or 43 /* same as TK_OR, synopsis: r[P3]=(r[P1] || r[P2]) */ #define OP_And 44 /* same as TK_AND, synopsis: r[P3]=(r[P1] && r[P2]) */ -#define OP_RowSetRead 45 /* jump, synopsis: r[P3]=rowset(P1) */ -#define OP_RowSetTest 46 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ -#define OP_Program 47 /* jump */ -#define OP_FkIfZero 48 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ -#define OP_IfPos 49 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IdxGE 45 /* jump, synopsis: key=r[P3@P4] */ +#define OP_RowSetRead 46 /* jump, synopsis: r[P3]=rowset(P1) */ +#define OP_RowSetTest 47 /* jump, synopsis: if r[P3] in rowset(P1) goto P2 */ +#define OP_Program 48 /* jump */ +#define OP_FkIfZero 49 /* jump, synopsis: if fkctr[P1]==0 goto P2 */ #define OP_IsNull 50 /* jump, same as TK_ISNULL, synopsis: if r[P1]==NULL goto P2 */ #define OP_NotNull 51 /* jump, same as TK_NOTNULL, synopsis: if r[P1]!=NULL goto P2 */ #define OP_Ne 52 /* jump, same as TK_NE, synopsis: IF r[P3]!=r[P1] */ @@ -15683,12 +16726,12 @@ typedef struct VdbeOpList VdbeOpList; #define OP_Lt 56 /* jump, same as TK_LT, synopsis: IF r[P3]=r[P1] */ #define OP_ElseEq 58 /* jump, same as TK_ESCAPE */ -#define OP_IfNotZero 59 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ -#define OP_DecrJumpZero 60 /* jump, synopsis: if (--r[P1])==0 goto P2 */ -#define OP_IncrVacuum 61 /* jump */ -#define OP_VNext 62 /* jump */ -#define OP_Filter 63 /* jump, synopsis: if key(P3@P4) not in filter(P1) goto P2 */ -#define OP_Init 64 /* jump, synopsis: Start at P2 */ +#define OP_IfPos 59 /* jump, synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */ +#define OP_IfNotZero 60 /* jump, synopsis: if r[P1]!=0 then r[P1]--, goto P2 */ +#define OP_DecrJumpZero 61 /* jump, synopsis: if (--r[P1])==0 goto P2 */ +#define OP_IncrVacuum 62 /* jump */ +#define OP_VNext 63 /* jump */ +#define OP_Filter 64 /* jump, synopsis: if key(P3@P4) not in filter(P1) goto P2 */ #define OP_PureFunc 65 /* synopsis: r[P3]=func(r[P2@NP]) */ #define OP_Function 66 /* synopsis: r[P3]=func(r[P2@NP]) */ #define OP_Return 67 @@ -15798,19 +16841,22 @@ typedef struct VdbeOpList VdbeOpList; #define OP_VCreate 171 #define OP_VDestroy 172 #define OP_VOpen 173 -#define OP_VInitIn 174 /* synopsis: r[P2]=ValueList(P1,P3) */ -#define OP_VColumn 175 /* synopsis: r[P3]=vcolumn(P2) */ -#define OP_VRename 176 -#define OP_Pagecount 177 -#define OP_MaxPgcnt 178 -#define OP_ClrSubtype 179 /* synopsis: r[P1].subtype = 0 */ -#define OP_FilterAdd 180 /* synopsis: filter(P1) += key(P3@P4) */ -#define OP_Trace 181 -#define OP_CursorHint 182 -#define OP_ReleaseReg 183 /* synopsis: release r[P1@P2] mask P3 */ -#define OP_Noop 184 -#define OP_Explain 185 -#define OP_Abortable 186 +#define OP_VCheck 174 +#define OP_VInitIn 175 /* synopsis: r[P2]=ValueList(P1,P3) */ +#define OP_VColumn 176 /* synopsis: r[P3]=vcolumn(P2) */ +#define OP_VRename 177 +#define OP_Pagecount 178 +#define OP_MaxPgcnt 179 +#define OP_ClrSubtype 180 /* synopsis: r[P1].subtype = 0 */ +#define OP_GetSubtype 181 /* synopsis: r[P2] = r[P1].subtype */ +#define OP_SetSubtype 182 /* synopsis: r[P2].subtype = r[P1] */ +#define OP_FilterAdd 183 /* synopsis: filter(P1) += key(P3@P4) */ +#define OP_Trace 184 +#define OP_CursorHint 185 +#define OP_ReleaseReg 186 /* synopsis: release r[P1@P2] mask P3 */ +#define OP_Noop 187 +#define OP_Explain 188 +#define OP_Abortable 189 /* Properties such as "out2" or "jump" that are specified in ** comments following the "case" for each opcode in the vdbe.c @@ -15822,31 +16868,32 @@ typedef struct VdbeOpList VdbeOpList; #define OPFLG_IN3 0x08 /* in3: P3 is an input */ #define OPFLG_OUT2 0x10 /* out2: P2 is an output */ #define OPFLG_OUT3 0x20 /* out3: P3 is an output */ +#define OPFLG_NCYCLE 0x40 /* ncycle:Cycles count against P1 */ #define OPFLG_INITIALIZER {\ -/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,\ -/* 8 */ 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01, 0x03,\ -/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x09, 0x09, 0x09, 0x09,\ -/* 24 */ 0x01, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x01,\ -/* 32 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\ -/* 40 */ 0x01, 0x01, 0x01, 0x26, 0x26, 0x23, 0x0b, 0x01,\ -/* 48 */ 0x01, 0x03, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ -/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x01, 0x01, 0x01,\ +/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x41, 0x00,\ +/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\ +/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\ +/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\ +/* 32 */ 0x41, 0x01, 0x41, 0x41, 0x41, 0x01, 0x41, 0x41,\ +/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\ +/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\ +/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\ /* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\ /* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\ /* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02,\ -/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x00, 0x00,\ -/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x26, 0x26,\ +/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x40, 0x00,\ +/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x26, 0x26,\ /* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\ -/* 112 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00,\ -/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\ -/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\ -/* 136 */ 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00,\ +/* 112 */ 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40, 0x00,\ +/* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\ +/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x50,\ +/* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\ /* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\ /* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\ /* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\ -/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\ -/* 176 */ 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00,\ -/* 184 */ 0x00, 0x00, 0x00,} +/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x50,\ +/* 176 */ 0x40, 0x00, 0x10, 0x10, 0x02, 0x12, 0x12, 0x00,\ +/* 184 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,} /* The resolve3P2Values() routine is able to run faster if it knows ** the value of the largest JUMP opcode. The smaller the maximum @@ -15899,14 +16946,20 @@ SQLITE_PRIVATE void sqlite3VdbeNoJumpsOutsideSubrtn(Vdbe*,int,int,int); #endif SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp,int iLineno); #ifndef SQLITE_OMIT_EXPLAIN -SQLITE_PRIVATE void sqlite3VdbeExplain(Parse*,u8,const char*,...); +SQLITE_PRIVATE int sqlite3VdbeExplain(Parse*,u8,const char*,...); SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse*); SQLITE_PRIVATE int sqlite3VdbeExplainParent(Parse*); # define ExplainQueryPlan(P) sqlite3VdbeExplain P +# ifdef SQLITE_ENABLE_STMT_SCANSTATUS +# define ExplainQueryPlan2(V,P) (V = sqlite3VdbeExplain P) +# else +# define ExplainQueryPlan2(V,P) ExplainQueryPlan(P) +# endif # define ExplainQueryPlanPop(P) sqlite3VdbeExplainPop(P) # define ExplainQueryPlanParent(P) sqlite3VdbeExplainParent(P) #else # define ExplainQueryPlan(P) +# define ExplainQueryPlan2(V,P) # define ExplainQueryPlanPop(P) # define ExplainQueryPlanParent(P) 0 # define sqlite3ExplainBreakpoint(A,B) /*no-op*/ @@ -15922,6 +16975,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1); SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2); SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, int addr, int P3); SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u16 P5); +SQLITE_PRIVATE void sqlite3VdbeTypeofColumn(Vdbe*, int); SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr); SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe*, int addr); SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe*, int addr); @@ -15936,6 +16990,7 @@ SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe*, void *pP4, int p4type); SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*); SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int); SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int); +SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetLastOp(Vdbe*); SQLITE_PRIVATE int sqlite3VdbeMakeLabel(Parse*); SQLITE_PRIVATE void sqlite3VdbeRunOnlyOnce(Vdbe*); SQLITE_PRIVATE void sqlite3VdbeReusable(Vdbe*); @@ -16013,7 +17068,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); ** The VdbeCoverage macros are used to set a coverage testing point ** for VDBE branch instructions. The coverage testing points are line ** numbers in the sqlite3.c source file. VDBE branch coverage testing -** only works with an amalagmation build. That's ok since a VDBE branch +** only works with an amalgamation build. That's ok since a VDBE branch ** coverage build designed for testing the test suite only. No application ** should ever ship with VDBE branch coverage measuring turned on. ** @@ -16031,7 +17086,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); ** // NULL option is not possible ** ** VdbeCoverageEqNe(v) // Previous OP_Jump is only interested -** // in distingishing equal and not-equal. +** // in distinguishing equal and not-equal. ** ** Every VDBE branch operation must be tagged with one of the macros above. ** If not, then when "make test" is run with -DSQLITE_VDBE_COVERAGE and @@ -16041,7 +17096,7 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe*, const char*, ...); ** During testing, the test application will invoke ** sqlite3_test_control(SQLITE_TESTCTRL_VDBE_COVERAGE,...) to set a callback ** routine that is invoked as each bytecode branch is taken. The callback -** contains the sqlite3.c source line number ov the VdbeCoverage macro and +** contains the sqlite3.c source line number of the VdbeCoverage macro and ** flags to indicate whether or not the branch was taken. The test application ** is responsible for keeping track of this and reporting byte-code branches ** that are never taken. @@ -16077,14 +17132,22 @@ SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int); #ifdef SQLITE_ENABLE_STMT_SCANSTATUS SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*); +SQLITE_PRIVATE void sqlite3VdbeScanStatusRange(Vdbe*, int, int, int); +SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(Vdbe*, int, int, int); #else -# define sqlite3VdbeScanStatus(a,b,c,d,e) +# define sqlite3VdbeScanStatus(a,b,c,d,e,f) +# define sqlite3VdbeScanStatusRange(a,b,c,d) +# define sqlite3VdbeScanStatusCounters(a,b,c,d) #endif #if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) SQLITE_PRIVATE void sqlite3VdbePrintOp(FILE*, int, VdbeOp*); #endif +#if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3CursorRangeHintExprCheck(Walker *pWalker, Expr *pExpr); +#endif + #endif /* SQLITE_VDBE_H */ /************** End of vdbe.h ************************************************/ @@ -16133,7 +17196,7 @@ struct PgHdr { ** private to pcache.c and should not be accessed by other modules. ** pCache is grouped with the public elements for efficiency. */ - i16 nRef; /* Number of users of this page */ + i64 nRef; /* Number of users of this page */ PgHdr *pDirtyNext; /* Next element in list of dirty pages */ PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */ /* NB: pDirtyNext and pDirtyPrev are undefined if the @@ -16214,12 +17277,12 @@ SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *); SQLITE_PRIVATE void sqlite3PcacheClear(PCache*); /* Return the total number of outstanding page references */ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*); +SQLITE_PRIVATE i64 sqlite3PcacheRefCount(PCache*); /* Increment the reference count of an existing page */ SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*); -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*); +SQLITE_PRIVATE i64 sqlite3PcachePageRefcount(PgHdr*); /* Return the total number of pages stored in the cache */ SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*); @@ -16284,297 +17347,6 @@ SQLITE_PRIVATE int sqlite3PCacheIsDirty(PCache *pCache); /************** End of pcache.h **********************************************/ /************** Continuing where we left off in sqliteInt.h ******************/ -/************** Include os.h in the middle of sqliteInt.h ********************/ -/************** Begin file os.h **********************************************/ -/* -** 2001 September 16 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file (together with is companion C source-code file -** "os.c") attempt to abstract the underlying operating system so that -** the SQLite library will work on both POSIX and windows systems. -** -** This header file is #include-ed by sqliteInt.h and thus ends up -** being included by every source file. -*/ -#ifndef _SQLITE_OS_H_ -#define _SQLITE_OS_H_ - -/* -** Attempt to automatically detect the operating system and setup the -** necessary pre-processor macros for it. -*/ -/************** Include os_setup.h in the middle of os.h *********************/ -/************** Begin file os_setup.h ****************************************/ -/* -** 2013 November 25 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains pre-processor directives related to operating system -** detection and/or setup. -*/ -#ifndef SQLITE_OS_SETUP_H -#define SQLITE_OS_SETUP_H - -/* -** Figure out if we are dealing with Unix, Windows, or some other operating -** system. -** -** After the following block of preprocess macros, all of SQLITE_OS_UNIX, -** SQLITE_OS_WIN, and SQLITE_OS_OTHER will defined to either 1 or 0. One of -** the three will be 1. The other two will be 0. -*/ -#if defined(SQLITE_OS_OTHER) -# if SQLITE_OS_OTHER==1 -# undef SQLITE_OS_UNIX -# define SQLITE_OS_UNIX 0 -# undef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# else -# undef SQLITE_OS_OTHER -# endif -#endif -#if !defined(SQLITE_OS_UNIX) && !defined(SQLITE_OS_OTHER) -# define SQLITE_OS_OTHER 0 -# ifndef SQLITE_OS_WIN -# if defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || \ - defined(__MINGW32__) || defined(__BORLANDC__) -# define SQLITE_OS_WIN 1 -# define SQLITE_OS_UNIX 0 -# else -# define SQLITE_OS_WIN 0 -# define SQLITE_OS_UNIX 1 -# endif -# else -# define SQLITE_OS_UNIX 0 -# endif -#else -# ifndef SQLITE_OS_WIN -# define SQLITE_OS_WIN 0 -# endif -#endif - -#endif /* SQLITE_OS_SETUP_H */ - -/************** End of os_setup.h ********************************************/ -/************** Continuing where we left off in os.h *************************/ - -/* If the SET_FULLSYNC macro is not defined above, then make it -** a no-op -*/ -#ifndef SET_FULLSYNC -# define SET_FULLSYNC(x,y) -#endif - -/* Maximum pathname length. Note: FILENAME_MAX defined by stdio.h -*/ -#ifndef SQLITE_MAX_PATHLEN -# define SQLITE_MAX_PATHLEN FILENAME_MAX -#endif - -/* Maximum number of symlinks that will be resolved while trying to -** expand a filename in xFullPathname() in the VFS. -*/ -#ifndef SQLITE_MAX_SYMLINK -# define SQLITE_MAX_SYMLINK 200 -#endif - -/* -** The default size of a disk sector -*/ -#ifndef SQLITE_DEFAULT_SECTOR_SIZE -# define SQLITE_DEFAULT_SECTOR_SIZE 4096 -#endif - -/* -** Temporary files are named starting with this prefix followed by 16 random -** alphanumeric characters, and no file extension. They are stored in the -** OS's standard temporary file directory, and are deleted prior to exit. -** If sqlite is being embedded in another program, you may wish to change the -** prefix to reflect your program's name, so that if your program exits -** prematurely, old temporary files can be easily identified. This can be done -** using -DSQLITE_TEMP_FILE_PREFIX=myprefix_ on the compiler command line. -** -** 2006-10-31: The default prefix used to be "sqlite_". But then -** Mcafee started using SQLite in their anti-virus product and it -** started putting files with the "sqlite" name in the c:/temp folder. -** This annoyed many windows users. Those users would then do a -** Google search for "sqlite", find the telephone numbers of the -** developers and call to wake them up at night and complain. -** For this reason, the default name prefix is changed to be "sqlite" -** spelled backwards. So the temp files are still identified, but -** anybody smart enough to figure out the code is also likely smart -** enough to know that calling the developer will not help get rid -** of the file. -*/ -#ifndef SQLITE_TEMP_FILE_PREFIX -# define SQLITE_TEMP_FILE_PREFIX "etilqs_" -#endif - -/* -** The following values may be passed as the second argument to -** sqlite3OsLock(). The various locks exhibit the following semantics: -** -** SHARED: Any number of processes may hold a SHARED lock simultaneously. -** RESERVED: A single process may hold a RESERVED lock on a file at -** any time. Other processes may hold and obtain new SHARED locks. -** PENDING: A single process may hold a PENDING lock on a file at -** any one time. Existing SHARED locks may persist, but no new -** SHARED locks may be obtained by other processes. -** EXCLUSIVE: An EXCLUSIVE lock precludes all other locks. -** -** PENDING_LOCK may not be passed directly to sqlite3OsLock(). Instead, a -** process that requests an EXCLUSIVE lock may actually obtain a PENDING -** lock. This can be upgraded to an EXCLUSIVE lock by a subsequent call to -** sqlite3OsLock(). -*/ -#define NO_LOCK 0 -#define SHARED_LOCK 1 -#define RESERVED_LOCK 2 -#define PENDING_LOCK 3 -#define EXCLUSIVE_LOCK 4 - -/* -** File Locking Notes: (Mostly about windows but also some info for Unix) -** -** We cannot use LockFileEx() or UnlockFileEx() on Win95/98/ME because -** those functions are not available. So we use only LockFile() and -** UnlockFile(). -** -** LockFile() prevents not just writing but also reading by other processes. -** A SHARED_LOCK is obtained by locking a single randomly-chosen -** byte out of a specific range of bytes. The lock byte is obtained at -** random so two separate readers can probably access the file at the -** same time, unless they are unlucky and choose the same lock byte. -** An EXCLUSIVE_LOCK is obtained by locking all bytes in the range. -** There can only be one writer. A RESERVED_LOCK is obtained by locking -** a single byte of the file that is designated as the reserved lock byte. -** A PENDING_LOCK is obtained by locking a designated byte different from -** the RESERVED_LOCK byte. -** -** On WinNT/2K/XP systems, LockFileEx() and UnlockFileEx() are available, -** which means we can use reader/writer locks. When reader/writer locks -** are used, the lock is placed on the same range of bytes that is used -** for probabilistic locking in Win95/98/ME. Hence, the locking scheme -** will support two or more Win95 readers or two or more WinNT readers. -** But a single Win95 reader will lock out all WinNT readers and a single -** WinNT reader will lock out all other Win95 readers. -** -** The following #defines specify the range of bytes used for locking. -** SHARED_SIZE is the number of bytes available in the pool from which -** a random byte is selected for a shared lock. The pool of bytes for -** shared locks begins at SHARED_FIRST. -** -** The same locking strategy and -** byte ranges are used for Unix. This leaves open the possibility of having -** clients on win95, winNT, and unix all talking to the same shared file -** and all locking correctly. To do so would require that samba (or whatever -** tool is being used for file sharing) implements locks correctly between -** windows and unix. I'm guessing that isn't likely to happen, but by -** using the same locking range we are at least open to the possibility. -** -** Locking in windows is manditory. For this reason, we cannot store -** actual data in the bytes used for locking. The pager never allocates -** the pages involved in locking therefore. SHARED_SIZE is selected so -** that all locks will fit on a single page even at the minimum page size. -** PENDING_BYTE defines the beginning of the locks. By default PENDING_BYTE -** is set high so that we don't have to allocate an unused page except -** for very large databases. But one should test the page skipping logic -** by setting PENDING_BYTE low and running the entire regression suite. -** -** Changing the value of PENDING_BYTE results in a subtly incompatible -** file format. Depending on how it is changed, you might not notice -** the incompatibility right away, even running a full regression test. -** The default location of PENDING_BYTE is the first byte past the -** 1GB boundary. -** -*/ -#ifdef SQLITE_OMIT_WSD -# define PENDING_BYTE (0x40000000) -#else -# define PENDING_BYTE sqlite3PendingByte -#endif -#define RESERVED_BYTE (PENDING_BYTE+1) -#define SHARED_FIRST (PENDING_BYTE+2) -#define SHARED_SIZE 510 - -/* -** Wrapper around OS specific sqlite3_os_init() function. -*/ -SQLITE_PRIVATE int sqlite3OsInit(void); - -/* -** Functions for accessing sqlite3_file methods -*/ -SQLITE_PRIVATE void sqlite3OsClose(sqlite3_file*); -SQLITE_PRIVATE int sqlite3OsRead(sqlite3_file*, void*, int amt, i64 offset); -SQLITE_PRIVATE int sqlite3OsWrite(sqlite3_file*, const void*, int amt, i64 offset); -SQLITE_PRIVATE int sqlite3OsTruncate(sqlite3_file*, i64 size); -SQLITE_PRIVATE int sqlite3OsSync(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file*, i64 *pSize); -SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file*, int); -SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut); -SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file*,int,void*); -SQLITE_PRIVATE void sqlite3OsFileControlHint(sqlite3_file*,int,void*); -#define SQLITE_FCNTL_DB_UNCHANGED 0xca093fa0 -SQLITE_PRIVATE int sqlite3OsSectorSize(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsDeviceCharacteristics(sqlite3_file *id); -#ifndef SQLITE_OMIT_WAL -SQLITE_PRIVATE int sqlite3OsShmMap(sqlite3_file *,int,int,int,void volatile **); -SQLITE_PRIVATE int sqlite3OsShmLock(sqlite3_file *id, int, int, int); -SQLITE_PRIVATE void sqlite3OsShmBarrier(sqlite3_file *id); -SQLITE_PRIVATE int sqlite3OsShmUnmap(sqlite3_file *id, int); -#endif /* SQLITE_OMIT_WAL */ -SQLITE_PRIVATE int sqlite3OsFetch(sqlite3_file *id, i64, int, void **); -SQLITE_PRIVATE int sqlite3OsUnfetch(sqlite3_file *, i64, void *); - - -/* -** Functions for accessing sqlite3_vfs methods -*/ -SQLITE_PRIVATE int sqlite3OsOpen(sqlite3_vfs *, const char *, sqlite3_file*, int, int *); -SQLITE_PRIVATE int sqlite3OsDelete(sqlite3_vfs *, const char *, int); -SQLITE_PRIVATE int sqlite3OsAccess(sqlite3_vfs *, const char *, int, int *pResOut); -SQLITE_PRIVATE int sqlite3OsFullPathname(sqlite3_vfs *, const char *, int, char *); -#ifndef SQLITE_OMIT_LOAD_EXTENSION -SQLITE_PRIVATE void *sqlite3OsDlOpen(sqlite3_vfs *, const char *); -SQLITE_PRIVATE void sqlite3OsDlError(sqlite3_vfs *, int, char *); -SQLITE_PRIVATE void (*sqlite3OsDlSym(sqlite3_vfs *, void *, const char *))(void); -SQLITE_PRIVATE void sqlite3OsDlClose(sqlite3_vfs *, void *); -#endif /* SQLITE_OMIT_LOAD_EXTENSION */ -SQLITE_PRIVATE int sqlite3OsRandomness(sqlite3_vfs *, int, char *); -SQLITE_PRIVATE int sqlite3OsSleep(sqlite3_vfs *, int); -SQLITE_PRIVATE int sqlite3OsGetLastError(sqlite3_vfs*); -SQLITE_PRIVATE int sqlite3OsCurrentTimeInt64(sqlite3_vfs *, sqlite3_int64*); - -/* -** Convenience functions for opening and closing files using -** sqlite3_malloc() to obtain space for the file-handle structure. -*/ -SQLITE_PRIVATE int sqlite3OsOpenMalloc(sqlite3_vfs *, const char *, sqlite3_file **, int,int*); -SQLITE_PRIVATE void sqlite3OsCloseFree(sqlite3_file *); - -#endif /* _SQLITE_OS_H_ */ - -/************** End of os.h **************************************************/ -/************** Continuing where we left off in sqliteInt.h ******************/ /************** Include mutex.h in the middle of sqliteInt.h *****************/ /************** Begin file mutex.h *******************************************/ /* @@ -16663,7 +17435,7 @@ SQLITE_API int sqlite3_mutex_held(sqlite3_mutex*); /* ** Default synchronous levels. ** -** Note that (for historcal reasons) the PAGER_SYNCHRONOUS_* macros differ +** Note that (for historical reasons) the PAGER_SYNCHRONOUS_* macros differ ** from the SQLITE_DEFAULT_SYNCHRONOUS value by 1. ** ** PAGER_SYNCHRONOUS DEFAULT_SYNCHRONOUS @@ -16702,7 +17474,7 @@ struct Db { ** An instance of the following structure stores a database schema. ** ** Most Schema objects are associated with a Btree. The exception is -** the Schema for the TEMP databaes (sqlite3.aDb[1]) which is free-standing. +** the Schema for the TEMP database (sqlite3.aDb[1]) which is free-standing. ** In shared cache mode, a single Schema object can be shared by multiple ** Btrees that refer to the same underlying BtShared object. ** @@ -16813,13 +17585,14 @@ struct Lookaside { LookasideSlot *pInit; /* List of buffers not previously used */ LookasideSlot *pFree; /* List of available buffers */ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE - LookasideSlot *pSmallInit; /* List of small buffers not prediously used */ + LookasideSlot *pSmallInit; /* List of small buffers not previously used */ LookasideSlot *pSmallFree; /* List of available small buffers */ void *pMiddle; /* First byte past end of full-size buffers and ** the first byte of LOOKASIDE_SMALL buffers */ #endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ void *pStart; /* First byte of available memory space */ void *pEnd; /* First byte past end of available space */ + void *pTrueEnd; /* True value of pEnd, when db->pnBytesFreed!=0 */ }; struct LookasideSlot { LookasideSlot *pNext; /* Next buffer in the list of free buffers */ @@ -16829,7 +17602,7 @@ struct LookasideSlot { #define EnableLookaside db->lookaside.bDisable--;\ db->lookaside.sz=db->lookaside.bDisable?0:db->lookaside.szTrue -/* Size of the smaller allocations in two-size lookside */ +/* Size of the smaller allocations in two-size lookaside */ #ifdef SQLITE_OMIT_TWOSIZE_LOOKASIDE # define LOOKASIDE_SMALL 0 #else @@ -17029,6 +17802,7 @@ struct sqlite3 { i64 nDeferredCons; /* Net deferred constraints this transaction. */ i64 nDeferredImmCons; /* Net deferred immediate constraints */ int *pnBytesFreed; /* If not NULL, increment this in DbFree() */ + DbClientData *pDbData; /* sqlite3_set_clientdata() content */ #ifdef SQLITE_ENABLE_UNLOCK_NOTIFY /* The following variables are all protected by the STATIC_MAIN ** mutex, not by sqlite3.mutex. They are used by code in notify.c. @@ -17084,7 +17858,7 @@ struct sqlite3 { #define SQLITE_NullCallback 0x00000100 /* Invoke the callback once if the */ /* result set is empty */ #define SQLITE_IgnoreChecks 0x00000200 /* Do not enforce check constraints */ -#define SQLITE_ReadUncommit 0x00000400 /* READ UNCOMMITTED in shared-cache */ +#define SQLITE_StmtScanStatus 0x00000400 /* Enable stmt_scanstats() counters */ #define SQLITE_NoCkptOnClose 0x00000800 /* No checkpoint on close()/DETACH */ #define SQLITE_ReverseOrder 0x00001000 /* Reverse unordered SELECTs */ #define SQLITE_RecTriggers 0x00002000 /* Enable recursive triggers */ @@ -17110,6 +17884,8 @@ struct sqlite3 { /* DELETE, or UPDATE and return */ /* the count using a callback. */ #define SQLITE_CorruptRdOnly HI(0x00002) /* Prohibit writes due to error */ +#define SQLITE_ReadUncommit HI(0x00004) /* READ UNCOMMITTED in shared-cache */ +#define SQLITE_FkNoAction HI(0x00008) /* Treat all FK as NO ACTION */ /* Flags used only if debugging */ #ifdef SQLITE_DEBUG @@ -17164,6 +17940,10 @@ struct sqlite3 { #define SQLITE_ReleaseReg 0x00400000 /* Use OP_ReleaseReg for testing */ #define SQLITE_FlttnUnionAll 0x00800000 /* Disable the UNION ALL flattener */ /* TH3 expects this value ^^^^^^^^^^ See flatten04.test */ +#define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */ +#define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */ +#define SQLITE_NullUnusedCols 0x04000000 /* NULL unused columns in subqueries */ +#define SQLITE_OnePass 0x08000000 /* Single-pass DELETE and UPDATE */ #define SQLITE_AllOpts 0xffffffff /* All optimizations */ /* @@ -17246,10 +18026,17 @@ struct FuncDestructor { ** SQLITE_FUNC_ANYORDER == NC_OrderAgg == SF_OrderByReqd ** SQLITE_FUNC_LENGTH == OPFLAG_LENGTHARG ** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG +** SQLITE_FUNC_BYTELEN == OPFLAG_BYTELENARG ** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API ** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API -** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS +** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS -- opposite meanings!!! ** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API +** +** Note that even though SQLITE_FUNC_UNSAFE and SQLITE_INNOCUOUS have the +** same bit value, their meanings are inverted. SQLITE_FUNC_UNSAFE is +** used internally and if set means that the function has side effects. +** SQLITE_INNOCUOUS is used by application code and means "not unsafe". +** See multiple instances of tag-20230109-1. */ #define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */ #define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */ @@ -17258,6 +18045,7 @@ struct FuncDestructor { #define SQLITE_FUNC_NEEDCOLL 0x0020 /* sqlite3GetFuncCollSeq() might be called*/ #define SQLITE_FUNC_LENGTH 0x0040 /* Built-in length() function */ #define SQLITE_FUNC_TYPEOF 0x0080 /* Built-in typeof() function */ +#define SQLITE_FUNC_BYTELEN 0x00c0 /* Built-in octet_length() function */ #define SQLITE_FUNC_COUNT 0x0100 /* Built-in count(*) aggregate */ /* 0x0200 -- available for reuse */ #define SQLITE_FUNC_UNLIKELY 0x0400 /* Built-in unlikely() function */ @@ -17266,14 +18054,15 @@ struct FuncDestructor { #define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a ** single query - might change over time */ #define SQLITE_FUNC_TEST 0x4000 /* Built-in testing functions */ -/* 0x8000 -- available for reuse */ +#define SQLITE_FUNC_RUNONLY 0x8000 /* Cannot be used by valueFromFunction */ #define SQLITE_FUNC_WINDOW 0x00010000 /* Built-in window-only function */ #define SQLITE_FUNC_INTERNAL 0x00040000 /* For use by NestedParse() only */ #define SQLITE_FUNC_DIRECT 0x00080000 /* Not for use in TRIGGERs or VIEWs */ -#define SQLITE_FUNC_SUBTYPE 0x00100000 /* Result likely to have sub-type */ +/* SQLITE_SUBTYPE 0x00100000 // Consumer of subtypes */ #define SQLITE_FUNC_UNSAFE 0x00200000 /* Function has side effects */ #define SQLITE_FUNC_INLINE 0x00400000 /* Functions implemented in-line */ #define SQLITE_FUNC_BUILTIN 0x00800000 /* This is a built-in function */ +/* SQLITE_RESULT_SUBTYPE 0x01000000 // Generator of subtypes */ #define SQLITE_FUNC_ANYORDER 0x08000000 /* count/min/max aggregate */ /* Identifier numbers for each in-line function */ @@ -17365,10 +18154,11 @@ struct FuncDestructor { #define MFUNCTION(zName, nArg, xPtr, xFunc) \ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ xPtr, 0, xFunc, 0, 0, 0, #zName, {0} } -#define JFUNCTION(zName, nArg, iArg, xFunc) \ - {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS|\ - SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \ - SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} } +#define JFUNCTION(zName, nArg, bUseCache, bWS, bRS, bJsonB, iArg, xFunc) \ + {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_FUNC_CONSTANT|\ + SQLITE_UTF8|((bUseCache)*SQLITE_FUNC_RUNONLY)|\ + ((bRS)*SQLITE_SUBTYPE)|((bWS)*SQLITE_RESULT_SUBTYPE), \ + SQLITE_INT_TO_PTR(iArg|((bJsonB)*JSON_BLOB)),0,xFunc,0, 0, 0, #zName, {0} } #define INLINE_FUNC(zName, nArg, iArg, mFlags) \ {nArg, SQLITE_FUNC_BUILTIN|\ SQLITE_UTF8|SQLITE_FUNC_INLINE|SQLITE_FUNC_CONSTANT|(mFlags), \ @@ -17558,6 +18348,7 @@ struct CollSeq { #define SQLITE_AFF_NUMERIC 0x43 /* 'C' */ #define SQLITE_AFF_INTEGER 0x44 /* 'D' */ #define SQLITE_AFF_REAL 0x45 /* 'E' */ +#define SQLITE_AFF_FLEXNUM 0x46 /* 'F' */ #define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC) @@ -17628,6 +18419,7 @@ struct VTable { sqlite3_vtab *pVtab; /* Pointer to vtab instance */ int nRef; /* Number of pointers to this structure */ u8 bConstraint; /* True if constraints are supported */ + u8 bAllSchemas; /* True if might use any attached schema */ u8 eVtabRisk; /* Riskiness of allowing hacker access */ int iSavepoint; /* Depth of the SAVEPOINT stack */ VTable *pNext; /* Next in linked list (see above) */ @@ -17736,7 +18528,7 @@ struct Table { #ifndef SQLITE_OMIT_VIRTUALTABLE # define IsVirtual(X) ((X)->eTabType==TABTYP_VTAB) # define ExprIsVtab(X) \ - ((X)->op==TK_COLUMN && (X)->y.pTab!=0 && (X)->y.pTab->eTabType==TABTYP_VTAB) + ((X)->op==TK_COLUMN && (X)->y.pTab->eTabType==TABTYP_VTAB) #else # define IsVirtual(X) 0 # define ExprIsVtab(X) 0 @@ -17764,6 +18556,15 @@ struct Table { #define HasRowid(X) (((X)->tabFlags & TF_WithoutRowid)==0) #define VisibleRowid(X) (((X)->tabFlags & TF_NoVisibleRowid)==0) +/* Macro is true if the SQLITE_ALLOW_ROWID_IN_VIEW (mis-)feature is +** available. By default, this macro is false +*/ +#ifndef SQLITE_ALLOW_ROWID_IN_VIEW +# define ViewCanHaveRowid 0 +#else +# define ViewCanHaveRowid (sqlite3Config.mNoVisibleRowid==0) +#endif + /* ** Each foreign key constraint is an instance of the following structure. ** @@ -17835,7 +18636,7 @@ struct FKey { ** foreign key. ** ** The OE_Default value is a place holder that means to use whatever -** conflict resolution algorthm is required from context. +** conflict resolution algorithm is required from context. ** ** The following symbolic values are used to record which type ** of conflict resolution action to take. @@ -17953,10 +18754,22 @@ struct UnpackedRecord { ** The Index.onError field determines whether or not the indexed columns ** must be unique and what to do if they are not. When Index.onError=OE_None, ** it means this is not a unique index. Otherwise it is a unique index -** and the value of Index.onError indicate the which conflict resolution -** algorithm to employ whenever an attempt is made to insert a non-unique +** and the value of Index.onError indicates which conflict resolution +** algorithm to employ when an attempt is made to insert a non-unique ** element. ** +** The colNotIdxed bitmask is used in combination with SrcItem.colUsed +** for a fast test to see if an index can serve as a covering index. +** colNotIdxed has a 1 bit for every column of the original table that +** is *not* available in the index. Thus the expression +** "colUsed & colNotIdxed" will be non-zero if the index is not a +** covering index. The most significant bit of of colNotIdxed will always +** be true (note-20221022-a). If a column beyond the 63rd column of the +** table is used, the "colUsed & colNotIdxed" test will always be non-zero +** and we have to assume either that the index is not covering, or use +** an alternative (slower) algorithm to determine whether or not +** the index is covering. +** ** While parsing a CREATE TABLE or CREATE INDEX statement in order to ** generate VDBE code (as opposed to parsing one read from an sqlite_schema ** table as part of parsing an existing database schema), transient instances @@ -17989,18 +18802,22 @@ struct Index { unsigned isCovering:1; /* True if this is a covering index */ unsigned noSkipScan:1; /* Do not try to use skip-scan if true */ unsigned hasStat1:1; /* aiRowLogEst values come from sqlite_stat1 */ + unsigned bLowQual:1; /* sqlite_stat1 says this is a low-quality index */ unsigned bNoQuery:1; /* Do not use this index to optimize queries */ unsigned bAscKeyBug:1; /* True if the bba7b69f9849b5bf bug applies */ unsigned bHasVCol:1; /* Index references one or more VIRTUAL columns */ + unsigned bHasExpr:1; /* Index contains an expression, either a literal + ** expression, or a reference to a VIRTUAL column */ #ifdef SQLITE_ENABLE_STAT4 int nSample; /* Number of elements in aSample[] */ + int mxSample; /* Number of slots allocated to aSample[] */ int nSampleCol; /* Size of IndexSample.anEq[] and so on */ tRowcnt *aAvgEq; /* Average nEq values for keys not in aSample */ IndexSample *aSample; /* Samples of the left-most key */ tRowcnt *aiRowEst; /* Non-logarithmic stat1 data for this index */ tRowcnt nRowEst0; /* Non-logarithmic number of rows in the index */ #endif - Bitmask colNotIdxed; /* 0 for unindexed columns in pTab */ + Bitmask colNotIdxed; /* Unindexed columns in pTab */ }; /* @@ -18075,16 +18892,15 @@ struct AggInfo { ** from source tables rather than from accumulators */ u8 useSortingIdx; /* In direct mode, reference the sorting index rather ** than the source table */ + u16 nSortingColumn; /* Number of columns in the sorting index */ int sortingIdx; /* Cursor number of the sorting index */ int sortingIdxPTab; /* Cursor number of pseudo-table */ - int nSortingColumn; /* Number of columns in the sorting index */ - int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */ + int iFirstReg; /* First register in range for aCol[] and aFunc[] */ ExprList *pGroupBy; /* The group by clause */ struct AggInfo_col { /* For each column used in source tables */ Table *pTab; /* Source table */ Expr *pCExpr; /* The original expression */ int iTable; /* Cursor number of the source table */ - int iMem; /* Memory location that acts as accumulator */ i16 iColumn; /* Column number within the source table */ i16 iSorterColumn; /* Column number in the sorting index */ } *aCol; @@ -18095,14 +18911,31 @@ struct AggInfo { struct AggInfo_func { /* For each aggregate function */ Expr *pFExpr; /* Expression encoding the function */ FuncDef *pFunc; /* The aggregate function implementation */ - int iMem; /* Memory location that acts as accumulator */ int iDistinct; /* Ephemeral table used to enforce DISTINCT */ int iDistAddr; /* Address of OP_OpenEphemeral */ + int iOBTab; /* Ephemeral table to implement ORDER BY */ + u8 bOBPayload; /* iOBTab has payload columns separate from key */ + u8 bOBUnique; /* Enforce uniqueness on iOBTab keys */ + u8 bUseSubtype; /* Transfer subtype info through sorter */ } *aFunc; int nFunc; /* Number of entries in aFunc[] */ u32 selId; /* Select to which this AggInfo belongs */ +#ifdef SQLITE_DEBUG + Select *pSelect; /* SELECT statement that this AggInfo supports */ +#endif }; +/* +** Macros to compute aCol[] and aFunc[] register numbers. +** +** These macros should not be used prior to the call to +** assignAggregateRegisters() that computes the value of pAggInfo->iFirstReg. +** The assert()s that are part of this macro verify that constraint. +*/ +#define AggInfoColumnReg(A,I) (assert((A)->iFirstReg),(A)->iFirstReg+(I)) +#define AggInfoFuncReg(A,I) \ + (assert((A)->iFirstReg),(A)->iFirstReg+(A)->nColumn+(I)) + /* ** The datatype ynVar is a signed integer, either 16-bit or 32-bit. ** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater @@ -18222,7 +19055,7 @@ struct Expr { ** TK_REGISTER: register number ** TK_TRIGGER: 1 -> new, 0 -> old ** EP_Unlikely: 134217728 times likelihood - ** TK_IN: ephemerial table holding RHS + ** TK_IN: ephemeral table holding RHS ** TK_SELECT_COLUMN: Number of columns on the LHS ** TK_SELECT: 1st register of result vector */ ynVar iColumn; /* TK_COLUMN: column index. -1 for rowid. @@ -18268,7 +19101,7 @@ struct Expr { #define EP_Reduced 0x004000 /* Expr struct EXPR_REDUCEDSIZE bytes only */ #define EP_Win 0x008000 /* Contains window functions */ #define EP_TokenOnly 0x010000 /* Expr struct EXPR_TOKENONLYSIZE bytes only */ -#define EP_MemToken 0x020000 /* Need to sqlite3DbFree() Expr.zToken */ +#define EP_FullSize 0x020000 /* Expr structure must remain full sized */ #define EP_IfNullRow 0x040000 /* The TK_IF_NULL_ROW opcode */ #define EP_Unlikely 0x080000 /* unlikely() or likelihood() function */ #define EP_ConstFunc 0x100000 /* A SQLITE_FUNC_CONSTANT or _SLOCHNG function */ @@ -18298,12 +19131,15 @@ struct Expr { #define ExprClearProperty(E,P) (E)->flags&=~(P) #define ExprAlwaysTrue(E) (((E)->flags&(EP_OuterON|EP_IsTrue))==EP_IsTrue) #define ExprAlwaysFalse(E) (((E)->flags&(EP_OuterON|EP_IsFalse))==EP_IsFalse) +#define ExprIsFullSize(E) (((E)->flags&(EP_Reduced|EP_TokenOnly))==0) /* Macros used to ensure that the correct members of unions are accessed ** in Expr. */ #define ExprUseUToken(E) (((E)->flags&EP_IntValue)==0) #define ExprUseUValue(E) (((E)->flags&EP_IntValue)!=0) +#define ExprUseWOfst(E) (((E)->flags&(EP_InnerON|EP_OuterON))==0) +#define ExprUseWJoin(E) (((E)->flags&(EP_InnerON|EP_OuterON))!=0) #define ExprUseXList(E) (((E)->flags&EP_xIsSelect)==0) #define ExprUseXSelect(E) (((E)->flags&EP_xIsSelect)!=0) #define ExprUseYTab(E) (((E)->flags&(EP_WinFunc|EP_Subrtn))==0) @@ -18413,6 +19249,7 @@ struct ExprList { #define ENAME_NAME 0 /* The AS clause of a result set */ #define ENAME_SPAN 1 /* Complete text of the result set expression */ #define ENAME_TAB 2 /* "DB.TABLE.NAME" for the result set */ +#define ENAME_ROWID 3 /* "DB.TABLE._rowid_" for * expansion of rowid */ /* ** An instance of this structure can hold a simple list of identifiers, @@ -18453,6 +19290,14 @@ struct IdList { ** The SrcItem object represents a single term in the FROM clause of a query. ** The SrcList object is mostly an array of SrcItems. ** +** The jointype starts out showing the join type between the current table +** and the next table on the list. The parser builds the list this way. +** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each +** jointype expresses the join between the table and the previous table. +** +** In the colUsed field, the high-order bit (bit 63) is set if the table +** contains more than 63 columns and the 64-th or later column is used. +** ** Union member validity: ** ** u1.zIndexedBy fg.isIndexedBy && !fg.isTabFunc @@ -18484,7 +19329,7 @@ struct SrcItem { unsigned notCte :1; /* This item may not match a CTE */ unsigned isUsing :1; /* u3.pUsing is valid */ unsigned isOn :1; /* u3.pOn was once valid and non-NULL */ - unsigned isSynthUsing :1; /* u3.pUsing is synthensized from NATURAL */ + unsigned isSynthUsing :1; /* u3.pUsing is synthesized from NATURAL */ unsigned isNestedFrom :1; /* pSelect is a SF_NestedFrom subquery */ } fg; int iCursor; /* The VDBE cursor number used to access this table */ @@ -18492,14 +19337,14 @@ struct SrcItem { Expr *pOn; /* fg.isUsing==0 => The ON clause of a join */ IdList *pUsing; /* fg.isUsing==1 => The USING clause of a join */ } u3; - Bitmask colUsed; /* Bit N (1<62 */ union { char *zIndexedBy; /* Identifier from "INDEXED BY " clause */ ExprList *pFuncArg; /* Arguments to table-valued-function */ } u1; union { Index *pIBIndex; /* Index structure corresponding to u1.zIndexedBy */ - CteUse *pCteUse; /* CTE Usage info info fg.isCte is true */ + CteUse *pCteUse; /* CTE Usage info when fg.isCte is true */ } u2; }; @@ -18513,23 +19358,11 @@ struct OnOrUsing { }; /* -** The following structure describes the FROM clause of a SELECT statement. -** Each table or subquery in the FROM clause is a separate element of -** the SrcList.a[] array. -** -** With the addition of multiple database support, the following structure -** can also be used to describe a particular table such as the table that -** is modified by an INSERT, DELETE, or UPDATE statement. In standard SQL, -** such a table must be a simple name: ID. But in SQLite, the table can -** now be identified by a database name, a dot, then the table name: ID.ID. -** -** The jointype starts out showing the join type between the current table -** and the next table on the list. The parser builds the list this way. -** But sqlite3SrcListShiftJoinType() later shifts the jointypes so that each -** jointype expresses the join between the table and the previous table. +** This object represents one or more tables that are the source of +** content for an SQL statement. For example, a single SrcList object +** is used to hold the FROM clause of a SELECT statement. SrcList also +** represents the target tables for DELETE, INSERT, and UPDATE statements. ** -** In the colUsed field, the high-order bit (bit 63) is set if the table -** contains more than 63 columns and the 64-th or later column is used. */ struct SrcList { int nSrc; /* Number of tables or subqueries in the FROM clause */ @@ -18617,6 +19450,7 @@ struct NameContext { int nRef; /* Number of names resolved by this context */ int nNcErr; /* Number of errors encountered while resolving names */ int ncFlags; /* Zero or more NC_* flags defined below */ + u32 nNestedSelect; /* Number of nested selects using this NC */ Select *pWinSelect; /* SELECT statement for any window functions */ }; @@ -18637,7 +19471,7 @@ struct NameContext { #define NC_HasAgg 0x000010 /* One or more aggregate functions seen */ #define NC_IdxExpr 0x000020 /* True if resolving columns of CREATE INDEX */ #define NC_SelfRef 0x00002e /* Combo: PartIdx, isCheck, GenCol, and IdxExpr */ -#define NC_VarSelect 0x000040 /* A correlated subquery has been seen */ +#define NC_Subquery 0x000040 /* A subquery has been seen */ #define NC_UEList 0x000080 /* True if uNC.pEList is used */ #define NC_UAggInfo 0x000100 /* True if uNC.pAggInfo is used */ #define NC_UUpsert 0x000200 /* True if uNC.pUpsert is used */ @@ -18650,6 +19484,7 @@ struct NameContext { #define NC_InAggFunc 0x020000 /* True if analyzing arguments to an agg func */ #define NC_FromDDL 0x040000 /* SQL text comes from sqlite_schema */ #define NC_NoSelect 0x080000 /* Do not descend into sub-selects */ +#define NC_Where 0x100000 /* Processing WHERE clause of a SELECT */ #define NC_OrderAgg 0x8000000 /* Has an aggregate other than count/min/max */ /* @@ -18673,6 +19508,7 @@ struct Upsert { Expr *pUpsertWhere; /* WHERE clause for the ON CONFLICT UPDATE */ Upsert *pNextUpsert; /* Next ON CONFLICT clause in the list */ u8 isDoUpdate; /* True for DO UPDATE. False for DO NOTHING */ + u8 isDup; /* True if 2nd or later with same pUpsertIdx */ /* Above this point is the parse tree for the ON CONFLICT clauses. ** The next group of fields stores intermediate data. */ void *pToFree; /* Free memory when deleting the Upsert object */ @@ -18766,6 +19602,7 @@ struct Select { #define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */ #define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */ #define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */ +#define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */ /* True if S exists and has SF_NestedFrom */ #define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0) @@ -18874,7 +19711,7 @@ struct SelectDest { int iSDParm2; /* A second parameter for the eDest disposal method */ int iSdst; /* Base register where results are written */ int nSdst; /* Number of registers allocated */ - char *zAffSdst; /* Affinity used when eDest==SRT_Set */ + char *zAffSdst; /* Affinity used for SRT_Set */ ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */ }; @@ -18933,11 +19770,34 @@ struct TriggerPrg { #else typedef unsigned int yDbMask; # define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0) -# define DbMaskZero(M) (M)=0 -# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I)) -# define DbMaskAllZero(M) (M)==0 -# define DbMaskNonZero(M) (M)!=0 +# define DbMaskZero(M) ((M)=0) +# define DbMaskSet(M,I) ((M)|=(((yDbMask)1)<<(I))) +# define DbMaskAllZero(M) ((M)==0) +# define DbMaskNonZero(M) ((M)!=0) +#endif + +/* +** For each index X that has as one of its arguments either an expression +** or the name of a virtual generated column, and if X is in scope such that +** the value of the expression can simply be read from the index, then +** there is an instance of this object on the Parse.pIdxExpr list. +** +** During code generation, while generating code to evaluate expressions, +** this list is consulted and if a matching expression is found, the value +** is read from the index rather than being recomputed. +*/ +struct IndexedExpr { + Expr *pExpr; /* The expression contained in the index */ + int iDataCur; /* The data cursor associated with the index */ + int iIdxCur; /* The index cursor */ + int iIdxCol; /* The index column that contains value of pExpr */ + u8 bMaybeNullRow; /* True if we need an OP_IfNullRow check */ + u8 aff; /* Affinity of the pExpr expression */ + IndexedExpr *pIENext; /* Next in a list of all indexed expressions */ +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + const char *zIdxName; /* Name of index, used only for bytecode comments */ #endif +}; /* ** An instance of the ParseCleanup object specifies an operation that @@ -18980,10 +19840,13 @@ struct Parse { u8 hasCompound; /* Need to invoke convertCompoundSelectToSubquery() */ u8 okConstFactor; /* OK to factor out constants */ u8 disableLookaside; /* Number of times lookaside has been disabled */ - u8 disableVtab; /* Disable all virtual tables for this parse */ + u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 withinRJSubrtn; /* Nesting level for RIGHT JOIN body subroutines */ #if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST) u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */ +#endif +#ifdef SQLITE_DEBUG + u8 ifNotExists; /* Might be true if IF NOT EXISTS. Assert()s only */ #endif int nRangeReg; /* Size of the temporary register block */ int iRangeReg; /* First register in temporary register block */ @@ -18997,6 +19860,8 @@ struct Parse { int nLabelAlloc; /* Number of slots in aLabel */ int *aLabel; /* Space to hold the labels */ ExprList *pConstExpr;/* Constant expressions */ + IndexedExpr *pIdxEpr;/* List of expressions used by active indexes */ + IndexedExpr *pIdxPartExpr; /* Exprs constrained by index WHERE clauses */ Token constraintName;/* Name of the constraint currently being parsed */ yDbMask writeMask; /* Start a write transaction on these databases */ yDbMask cookieMask; /* Bitmask of schema verified databases */ @@ -19004,6 +19869,9 @@ struct Parse { int regRoot; /* Register holding root page number for new objects */ int nMaxArg; /* Max args passed to user function by sub-program */ int nSelect; /* Number of SELECT stmts. Counter for Select.selId */ +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */ +#endif #ifndef SQLITE_OMIT_SHARED_CACHE int nTableLock; /* Number of locks in aTableLock */ TableLock *aTableLock; /* Required table locks for shared-cache mode */ @@ -19017,9 +19885,9 @@ struct Parse { int addrCrTab; /* Address of OP_CreateBtree on CREATE TABLE */ Returning *pReturning; /* The RETURNING clause */ } u1; - u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u32 oldmask; /* Mask of old.* columns referenced */ u32 newmask; /* Mask of new.* columns referenced */ + LogEst nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */ u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */ u8 bReturning; /* Coding a RETURNING trigger */ u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */ @@ -19143,6 +20011,7 @@ struct AuthContext { #define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */ #define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */ #define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */ +#define OPFLAG_BYTELENARG 0xc0 /* OP_Column only for octet_length() */ #define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */ #define OPFLAG_SEEKEQ 0x02 /* OP_Open** cursor uses EQ seek only */ #define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */ @@ -19264,6 +20133,7 @@ struct Returning { int iRetCur; /* Transient table holding RETURNING results */ int nRetCol; /* Number of in pReturnEL after expansion */ int iRetReg; /* Register array for holding a row of RETURNING */ + char zName[40]; /* Name of trigger: "sqlite_returning_%p" */ }; /* @@ -19285,6 +20155,28 @@ struct sqlite3_str { #define isMalloced(X) (((X)->printfFlags & SQLITE_PRINTF_MALLOCED)!=0) +/* +** The following object is the header for an "RCStr" or "reference-counted +** string". An RCStr is passed around and used like any other char* +** that has been dynamically allocated. The important interface +** differences: +** +** 1. RCStr strings are reference counted. They are deallocated +** when the reference count reaches zero. +** +** 2. Use sqlite3RCStrUnref() to free an RCStr string rather than +** sqlite3_free() +** +** 3. Make a (read-only) copy of a read-only RCStr string using +** sqlite3RCStrRef(). +** +** "String" is in the name, but an RCStr object can also be used to hold +** binary data. +*/ +struct RCStr { + u64 nRCRef; /* Number of references */ + /* Total structure size should be a multiple of 8 bytes for alignment */ +}; /* ** A pointer to this structure is used to communicate information @@ -19311,7 +20203,7 @@ typedef struct { /* Tuning parameters are set using SQLITE_TESTCTRL_TUNE and are controlled ** on debug-builds of the CLI using ".testctrl tune ID VALUE". Tuning ** parameters are for temporary use during development, to help find -** optimial values for parameters in the query planner. The should not +** optimal values for parameters in the query planner. The should not ** be used on trunk check-ins. They are a temporary mechanism available ** for transient development builds only. ** @@ -19337,6 +20229,10 @@ struct Sqlite3Config { u8 bUseCis; /* Use covering indices for full-scans */ u8 bSmallMalloc; /* Avoid large memory allocations if true */ u8 bExtraSchemaChecks; /* Verify type,name,tbl_name in schema */ + u8 bUseLongDouble; /* Make use of long double */ +#ifdef SQLITE_DEBUG + u8 bJsonSelfcheck; /* Double-check JSON parsing */ +#endif int mxStrlen; /* Maximum string length */ int neverCorrupt; /* Database is always well-formed */ int szLookaside; /* Default lookaside buffer size */ @@ -19383,6 +20279,11 @@ struct Sqlite3Config { #endif #ifndef SQLITE_UNTESTABLE int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */ +#endif +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + u32 mNoVisibleRowid; /* TF_NoVisibleRowid if the ROWID_IN_VIEW + ** feature is disabled. 0 if rowids can + ** occur in views. */ #endif int bLocaltimeFault; /* True to fail localtime() calls */ int (*xAltLocaltime)(const void*,void*); /* Alternative localtime() routine */ @@ -19423,6 +20324,7 @@ struct Walker { void (*xSelectCallback2)(Walker*,Select*);/* Second callback for SELECTs */ int walkerDepth; /* Number of subqueries */ u16 eCode; /* A small processing code */ + u16 mWFlags; /* Use-dependent flags */ union { /* Extra data for callback */ NameContext *pNC; /* Naming context */ int n; /* A counter */ @@ -19432,15 +20334,16 @@ struct Walker { struct RefSrcList *pRefSrcList; /* sqlite3ReferencesSrcList() */ int *aiCol; /* array of column indexes */ struct IdxCover *pIdxCover; /* Check for index coverage */ - struct IdxExprTrans *pIdxTrans; /* Convert idxed expr to column */ ExprList *pGroupBy; /* GROUP BY clause */ Select *pSelect; /* HAVING to WHERE clause ctx */ struct WindowRewrite *pRewrite; /* Window rewrite context */ struct WhereConst *pConst; /* WHERE clause constants */ struct RenameCtx *pRename; /* RENAME COLUMN context */ struct Table *pTab; /* Table of generated column */ + struct CoveringIndexCheck *pCovIdxCk; /* Check for covering index */ SrcItem *pSrcItem; /* A single FROM clause item */ - DbFixer *pFix; + DbFixer *pFix; /* See sqlite3FixSelect() */ + Mem *aMem; /* See sqlite3BtreeCursorHint() */ } u; }; @@ -19461,6 +20364,7 @@ struct DbFixer { /* Forward declarations */ SQLITE_PRIVATE int sqlite3WalkExpr(Walker*, Expr*); +SQLITE_PRIVATE int sqlite3WalkExprNN(Walker*, Expr*); SQLITE_PRIVATE int sqlite3WalkExprList(Walker*, ExprList*); SQLITE_PRIVATE int sqlite3WalkSelect(Walker*, Select*); SQLITE_PRIVATE int sqlite3WalkSelectExpr(Walker*, Select*); @@ -19541,6 +20445,16 @@ struct CteUse { }; +/* Client data associated with sqlite3_set_clientdata() and +** sqlite3_get_clientdata(). +*/ +struct DbClientData { + DbClientData *pNext; /* Next in a linked list */ + void *pData; /* The data */ + void (*xDestructor)(void*); /* Destructor. Might be NULL */ + char zName[1]; /* Name of this client data. MUST BE LAST */ +}; + #ifdef SQLITE_DEBUG /* ** An instance of the TreeView object is used for printing the content of @@ -19710,6 +20624,8 @@ SQLITE_PRIVATE int sqlite3CorruptPgnoError(int,Pgno); # define sqlite3Isxdigit(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x08) # define sqlite3Tolower(x) (sqlite3UpperToLower[(unsigned char)(x)]) # define sqlite3Isquote(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x80) +# define sqlite3JsonId1(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x42) +# define sqlite3JsonId2(x) (sqlite3CtypeMap[(unsigned char)(x)]&0x46) #else # define sqlite3Toupper(x) toupper((unsigned char)(x)) # define sqlite3Isspace(x) isspace((unsigned char)(x)) @@ -19719,6 +20635,8 @@ SQLITE_PRIVATE int sqlite3CorruptPgnoError(int,Pgno); # define sqlite3Isxdigit(x) isxdigit((unsigned char)(x)) # define sqlite3Tolower(x) tolower((unsigned char)(x)) # define sqlite3Isquote(x) ((x)=='"'||(x)=='\''||(x)=='['||(x)=='`') +# define sqlite3JsonId1(x) (sqlite3IsIdChar(x)&&(x)<'0') +# define sqlite3JsonId2(x) sqlite3IsIdChar(x) #endif SQLITE_PRIVATE int sqlite3IsIdChar(u8); @@ -19746,6 +20664,7 @@ SQLITE_PRIVATE void *sqlite3DbReallocOrFree(sqlite3 *, void *, u64); SQLITE_PRIVATE void *sqlite3DbRealloc(sqlite3 *, void *, u64); SQLITE_PRIVATE void sqlite3DbFree(sqlite3*, void*); SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3*, void*); +SQLITE_PRIVATE void sqlite3DbNNFreeNN(sqlite3*, void*); SQLITE_PRIVATE int sqlite3MallocSize(const void*); SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3*, const void*); SQLITE_PRIVATE void *sqlite3PageMalloc(int); @@ -19766,12 +20685,14 @@ SQLITE_PRIVATE int sqlite3HeapNearlyFull(void); */ #ifdef SQLITE_USE_ALLOCA # define sqlite3StackAllocRaw(D,N) alloca(N) -# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N) +# define sqlite3StackAllocRawNN(D,N) alloca(N) # define sqlite3StackFree(D,P) +# define sqlite3StackFreeNN(D,P) #else # define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N) -# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N) +# define sqlite3StackAllocRawNN(D,N) sqlite3DbMallocRawNN(D,N) # define sqlite3StackFree(D,P) sqlite3DbFree(D,P) +# define sqlite3StackFreeNN(D,P) sqlite3DbFreeNN(D,P) #endif /* Do not allow both MEMSYS5 and MEMSYS3 to be defined together. If they @@ -19819,10 +20740,13 @@ SQLITE_PRIVATE void sqlite3MutexWarnOnContention(sqlite3_mutex*); # define EXP754 (((u64)0x7ff)<<52) # define MAN754 ((((u64)1)<<52)-1) # define IsNaN(X) (((X)&EXP754)==EXP754 && ((X)&MAN754)!=0) +# define IsOvfl(X) (((X)&EXP754)==EXP754) SQLITE_PRIVATE int sqlite3IsNaN(double); +SQLITE_PRIVATE int sqlite3IsOverflow(double); #else -# define IsNaN(X) 0 -# define sqlite3IsNaN(X) 0 +# define IsNaN(X) 0 +# define sqlite3IsNaN(X) 0 +# define sqlite3IsOVerflow(X) 0 #endif /* @@ -19835,6 +20759,20 @@ struct PrintfArguments { sqlite3_value **apArg; /* The argument values */ }; +/* +** An instance of this object receives the decoding of a floating point +** value into an approximate decimal representation. +*/ +struct FpDecode { + char sign; /* '+' or '-' */ + char isSpecial; /* 1: Infinity 2: NaN */ + int n; /* Significant digits in the decode */ + int iDP; /* Location of the decimal point */ + char *z; /* Start of significant digits */ + char zBuf[24]; /* Storage for significant digits */ +}; + +SQLITE_PRIVATE void sqlite3FpDecode(FpDecode*,double,int,int); SQLITE_PRIVATE char *sqlite3MPrintf(sqlite3*,const char*, ...); SQLITE_PRIVATE char *sqlite3VMPrintf(sqlite3*,const char*, va_list); #if defined(SQLITE_DEBUG) || defined(SQLITE_HAVE_OS_TRACE) @@ -19894,6 +20832,7 @@ SQLITE_PRIVATE void sqlite3ShowWinFunc(const Window*); #endif SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*); +SQLITE_PRIVATE void sqlite3ProgressCheck(Parse*); SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...); SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int); SQLITE_PRIVATE void sqlite3Dequote(char*); @@ -19908,6 +20847,10 @@ SQLITE_PRIVATE void sqlite3ReleaseTempReg(Parse*,int); SQLITE_PRIVATE int sqlite3GetTempRange(Parse*,int); SQLITE_PRIVATE void sqlite3ReleaseTempRange(Parse*,int,int); SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse*); +SQLITE_PRIVATE void sqlite3TouchRegister(Parse*,int); +#if defined(SQLITE_ENABLE_STAT4) || defined(SQLITE_DEBUG) +SQLITE_PRIVATE int sqlite3FirstAvailableRegister(Parse*,int); +#endif #ifdef SQLITE_DEBUG SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int); #endif @@ -19919,9 +20862,12 @@ SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*); SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse*,Expr*, Expr*); SQLITE_PRIVATE Expr *sqlite3ExprSimplifiedAndOr(Expr*); SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, const Token*, int); +SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy(Parse*,Expr*,ExprList*); +SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse*,Expr*); SQLITE_PRIVATE void sqlite3ExprFunctionUsable(Parse*,const Expr*,const FuncDef*); SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse*, Expr*, u32); SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3*, Expr*); +SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse*, Expr*); SQLITE_PRIVATE void sqlite3ExprUnmapAndDelete(Parse*, Expr*); SQLITE_PRIVATE ExprList *sqlite3ExprListAppend(Parse*,ExprList*,Expr*); @@ -19931,6 +20877,7 @@ SQLITE_PRIVATE void sqlite3ExprListSetSortOrder(ExprList*,int,int); SQLITE_PRIVATE void sqlite3ExprListSetName(Parse*,ExprList*,const Token*,int); SQLITE_PRIVATE void sqlite3ExprListSetSpan(Parse*,ExprList*,const char*,const char*); SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3*, ExprList*); +SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*); SQLITE_PRIVATE int sqlite3IndexHasDuplicateRootPage(Index*); SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**); @@ -19951,7 +20898,7 @@ SQLITE_PRIVATE const char *sqlite3ColumnColl(Column*); SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*); SQLITE_PRIVATE void sqlite3GenerateColumnNames(Parse *pParse, Select *pSelect); SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**); -SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*,char); +SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(Parse*,Table*,Select*,char); SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*,char); SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *, int); SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*); @@ -20027,6 +20974,7 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask); SQLITE_PRIVATE void sqlite3DropTable(Parse*, SrcList*, int, int); SQLITE_PRIVATE void sqlite3CodeDropTable(Parse*, Table*, int, int); SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3*, Table*); +SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3*, void*); SQLITE_PRIVATE void sqlite3FreeIndex(sqlite3*, Index*); #ifndef SQLITE_OMIT_AUTOINCREMENT SQLITE_PRIVATE void sqlite3AutoincrementBegin(Parse *pParse); @@ -20063,8 +21011,9 @@ SQLITE_PRIVATE int sqlite3Select(Parse*, Select*, SelectDest*); SQLITE_PRIVATE Select *sqlite3SelectNew(Parse*,ExprList*,SrcList*,Expr*,ExprList*, Expr*,ExprList*,u32,Expr*); SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3*, Select*); +SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse*, SrcList*); -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, int); +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse*, Table*, Trigger*); SQLITE_PRIVATE void sqlite3OpenTable(Parse*, int iCur, int iDb, Table*, int); #if defined(SQLITE_ENABLE_UPDATE_DELETE_LIMIT) && !defined(SQLITE_OMIT_SUBQUERY) SQLITE_PRIVATE Expr *sqlite3LimitWhere(Parse*,SrcList*,Expr*,ExprList*,Expr*,char*); @@ -20126,7 +21075,7 @@ SQLITE_PRIVATE int sqlite3ExprCompare(const Parse*,const Expr*,const Expr*, int) SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr*,Expr*,int); SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList*,const ExprList*, int); SQLITE_PRIVATE int sqlite3ExprImpliesExpr(const Parse*,const Expr*,const Expr*, int); -SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int); +SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr*,int,int); SQLITE_PRIVATE void sqlite3AggInfoPersistWalkerInit(Walker*,Parse*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggregates(NameContext*, Expr*); SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*); @@ -20153,7 +21102,7 @@ SQLITE_PRIVATE int sqlite3ExprIsConstantNotJoin(Expr*); SQLITE_PRIVATE int sqlite3ExprIsConstantOrFunction(Expr*, u8); SQLITE_PRIVATE int sqlite3ExprIsConstantOrGroupBy(Parse*, Expr*, ExprList*); SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr*,int); -SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr*,const SrcItem*); +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint(Expr*,const SrcList*,int); #ifdef SQLITE_ENABLE_CURSOR_HINTS SQLITE_PRIVATE int sqlite3ExprContainsSubquery(Expr*); #endif @@ -20161,6 +21110,7 @@ SQLITE_PRIVATE int sqlite3ExprIsInteger(const Expr*, int*); SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr*); SQLITE_PRIVATE int sqlite3ExprNeedsNoAffinityChange(const Expr*, char); SQLITE_PRIVATE int sqlite3IsRowid(const char*); +SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab); SQLITE_PRIVATE void sqlite3GenerateRowDelete( Parse*,Table*,Trigger*,int,int,int,i16,u8,u8,u8,int); SQLITE_PRIVATE void sqlite3GenerateRowIndexDelete(Parse*, Table*, int, int, int*, int); @@ -20275,8 +21225,10 @@ SQLITE_PRIVATE int sqlite3FixSrcList(DbFixer*, SrcList*); SQLITE_PRIVATE int sqlite3FixSelect(DbFixer*, Select*); SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*); SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*); + SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64); -SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*); +SQLITE_PRIVATE i64 sqlite3RealToI64(double); +SQLITE_PRIVATE int sqlite3Int64ToText(i64,char*); SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8); SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*); SQLITE_PRIVATE int sqlite3GetUInt32(const char*, u32*); @@ -20286,6 +21238,7 @@ SQLITE_PRIVATE int sqlite3Utf16ByteLen(const void *pData, int nChar); #endif SQLITE_PRIVATE int sqlite3Utf8CharLen(const char *pData, int nByte); SQLITE_PRIVATE u32 sqlite3Utf8Read(const u8**); +SQLITE_PRIVATE int sqlite3Utf8ReadLimited(const u8*, int, u32*); SQLITE_PRIVATE LogEst sqlite3LogEst(u64); SQLITE_PRIVATE LogEst sqlite3LogEstAdd(LogEst,LogEst); SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double); @@ -20321,11 +21274,13 @@ SQLITE_PRIVATE int sqlite3VarintLen(u64 v); SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3*, Index*); +SQLITE_PRIVATE char *sqlite3TableAffinityStr(sqlite3*,const Table*); SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe*, Table*, int); SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2); SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity); SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table*,int); SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr); +SQLITE_PRIVATE int sqlite3ExprDataType(const Expr *pExpr); SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8); SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*); SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...); @@ -20342,6 +21297,9 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int); #ifndef SQLITE_OMIT_DESERIALIZE SQLITE_PRIVATE int sqlite3MemdbInit(void); +SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs*); +#else +# define sqlite3IsMemdb(X) 0 #endif SQLITE_PRIVATE const char *sqlite3ErrStr(int); @@ -20373,6 +21331,7 @@ SQLITE_PRIVATE void sqlite3FileSuffix3(const char*, char*); SQLITE_PRIVATE u8 sqlite3GetBoolean(const char *z,u8); SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value*, u8); +SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value*, void(*)(void*)); SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value*, u8); SQLITE_PRIVATE void sqlite3ValueSetStr(sqlite3_value*, int, const void *,u8, void(*)(void*)); @@ -20392,7 +21351,6 @@ SQLITE_PRIVATE const unsigned char sqlite3OpcodeProperty[]; SQLITE_PRIVATE const char sqlite3StrBINARY[]; SQLITE_PRIVATE const unsigned char sqlite3StdTypeLen[]; SQLITE_PRIVATE const char sqlite3StdTypeAffinity[]; -SQLITE_PRIVATE const char sqlite3StdTypeMap[]; SQLITE_PRIVATE const char *sqlite3StdType[]; SQLITE_PRIVATE const unsigned char sqlite3UpperToLower[]; SQLITE_PRIVATE const unsigned char *sqlite3aLTb; @@ -20425,7 +21383,8 @@ SQLITE_PRIVATE int sqlite3MatchEName( const struct ExprList_item*, const char*, const char*, - const char* + const char*, + int* ); SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr*); SQLITE_PRIVATE u8 sqlite3StrIHash(const char*); @@ -20481,8 +21440,13 @@ SQLITE_PRIVATE void sqlite3OomClear(sqlite3*); SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int); SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *); +SQLITE_PRIVATE char *sqlite3RCStrRef(char*); +SQLITE_PRIVATE void sqlite3RCStrUnref(void*); +SQLITE_PRIVATE char *sqlite3RCStrNew(u64); +SQLITE_PRIVATE char *sqlite3RCStrResize(char*,u64); + SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int); -SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, int); +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, i64); SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*); SQLITE_PRIVATE void sqlite3StrAccumSetError(StrAccum*, u8); SQLITE_PRIVATE void sqlite3ResultStrAccum(sqlite3_context*,StrAccum*); @@ -20596,10 +21560,7 @@ SQLITE_PRIVATE int sqlite3VtabCallDestroy(sqlite3*, int, const char *); SQLITE_PRIVATE int sqlite3VtabBegin(sqlite3 *, VTable *); SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction(sqlite3 *,FuncDef*, int nArg, Expr*); -#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ - && !defined(SQLITE_OMIT_VIRTUALTABLE) -SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info*); -#endif +SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(Parse*); SQLITE_PRIVATE sqlite3_int64 sqlite3StmtCurrentTime(sqlite3_context*); SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe*, const char*, int); SQLITE_PRIVATE int sqlite3TransferBindings(sqlite3_stmt *, sqlite3_stmt *); @@ -20624,6 +21585,7 @@ SQLITE_PRIVATE Cte *sqlite3CteNew(Parse*,Token*,ExprList*,Select*,u8); SQLITE_PRIVATE void sqlite3CteDelete(sqlite3*,Cte*); SQLITE_PRIVATE With *sqlite3WithAdd(Parse*,With*,Cte*); SQLITE_PRIVATE void sqlite3WithDelete(sqlite3*,With*); +SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3*,void*); SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8); #else # define sqlite3CteNew(P,T,E,S) ((void*)0) @@ -20636,7 +21598,7 @@ SQLITE_PRIVATE With *sqlite3WithPush(Parse*, With*, u8); SQLITE_PRIVATE Upsert *sqlite3UpsertNew(sqlite3*,ExprList*,Expr*,ExprList*,Expr*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDelete(sqlite3*,Upsert*); SQLITE_PRIVATE Upsert *sqlite3UpsertDup(sqlite3*,Upsert*); -SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*); +SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget(Parse*,SrcList*,Upsert*,Upsert*); SQLITE_PRIVATE void sqlite3UpsertDoUpdate(Parse*,Upsert*,Table*,Index*,int); SQLITE_PRIVATE Upsert *sqlite3UpsertOfIndex(Upsert*,Index*); SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert*); @@ -20735,6 +21697,7 @@ SQLITE_PRIVATE int sqlite3ExprCheckHeight(Parse*, int); #define sqlite3SelectExprHeight(x) 0 #define sqlite3ExprCheckHeight(x,y) #endif +SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr*,int); SQLITE_PRIVATE u32 sqlite3Get4byte(const u8*); SQLITE_PRIVATE void sqlite3Put4byte(u8*, u32); @@ -20836,6 +21799,22 @@ SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse*, Expr*); SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt); #endif +#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) +SQLITE_PRIVATE int sqlite3KvvfsInit(void); +#endif + +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +SQLITE_PRIVATE sqlite3_uint64 sqlite3Hwtime(void); +#endif + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +# define IS_STMT_SCANSTATUS(db) (db->flags & SQLITE_StmtScanStatus) +#else +# define IS_STMT_SCANSTATUS(db) 0 +#endif + #endif /* SQLITEINT_H */ /************** End of sqliteInt.h *******************************************/ @@ -20877,101 +21856,6 @@ SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt); */ #ifdef SQLITE_PERFORMANCE_TRACE -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/************** Include hwtime.h in the middle of os_common.h ****************/ -/************** Begin file hwtime.h ******************************************/ -/* -** 2008 May 27 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This file contains inline asm code for retrieving "high-performance" -** counters for x86 and x86_64 class CPUs. -*/ -#ifndef SQLITE_HWTIME_H -#define SQLITE_HWTIME_H - -/* -** The following routine only works on pentium-class (or newer) processors. -** It uses the RDTSC opcode to read the cycle count value out of the -** processor and returns that value. This can be used for high-res -** profiling. -*/ -#if !defined(__STRICT_ANSI__) && \ - (defined(__GNUC__) || defined(_MSC_VER)) && \ - (defined(i386) || defined(__i386__) || defined(_M_IX86)) - - #if defined(__GNUC__) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned int lo, hi; - __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); - return (sqlite_uint64)hi << 32 | lo; - } - - #elif defined(_MSC_VER) - - __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ - __asm { - rdtsc - ret ; return value at EDX:EAX - } - } - - #endif - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long val; - __asm__ __volatile__ ("rdtsc" : "=A" (val)); - return val; - } - -#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) - - __inline__ sqlite_uint64 sqlite3Hwtime(void){ - unsigned long long retval; - unsigned long junk; - __asm__ __volatile__ ("\n\ - 1: mftbu %1\n\ - mftb %L0\n\ - mftbu %0\n\ - cmpw %0,%1\n\ - bne 1b" - : "=r" (retval), "=r" (junk)); - return retval; - } - -#else - - /* - ** asm() is needed for hardware timing support. Without asm(), - ** disable the sqlite3Hwtime() routine. - ** - ** sqlite3Hwtime() is only used for some obscure debugging - ** and analysis configurations, not in any deliverable, so this - ** should not be a great loss. - */ -SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } - -#endif - -#endif /* !defined(SQLITE_HWTIME_H) */ - -/************** End of hwtime.h **********************************************/ -/************** Continuing where we left off in os_common.h ******************/ - static sqlite_uint64 g_start; static sqlite_uint64 g_elapsed; #define TIMER_START g_start=sqlite3Hwtime() @@ -21067,7 +21951,7 @@ SQLITE_API extern int sqlite3_open_file_count; ** autoconf-based build */ #if defined(_HAVE_SQLITE_CONFIG_H) && !defined(SQLITECONFIG_H) -/* #include "config.h" */ +/* #include "sqlite_cfg.h" */ #define SQLITECONFIG_H 1 #endif @@ -21099,14 +21983,14 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_4_BYTE_ALIGNED_MALLOC "4_BYTE_ALIGNED_MALLOC", #endif -#ifdef SQLITE_64BIT_STATS - "64BIT_STATS", -#endif #ifdef SQLITE_ALLOW_COVERING_INDEX_SCAN # if SQLITE_ALLOW_COVERING_INDEX_SCAN != 1 "ALLOW_COVERING_INDEX_SCAN=" CTIMEOPT_VAL(SQLITE_ALLOW_COVERING_INDEX_SCAN), # endif #endif +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + "ALLOW_ROWID_IN_VIEW", +#endif #ifdef SQLITE_ALLOW_URI_AUTHORITY "ALLOW_URI_AUTHORITY", #endif @@ -21232,6 +22116,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_DISABLE_SKIPAHEAD_DISTINCT "DISABLE_SKIPAHEAD_DISTINCT", #endif +#ifdef SQLITE_DQS + "DQS=" CTIMEOPT_VAL(SQLITE_DQS), +#endif #ifdef SQLITE_ENABLE_8_3_NAMES "ENABLE_8_3_NAMES=" CTIMEOPT_VAL(SQLITE_ENABLE_8_3_NAMES), #endif @@ -21394,6 +22281,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS "EXPLAIN_ESTIMATED_ROWS", #endif +#ifdef SQLITE_EXTRA_AUTOEXT + "EXTRA_AUTOEXT=" CTIMEOPT_VAL(SQLITE_EXTRA_AUTOEXT), +#endif #ifdef SQLITE_EXTRA_IFNULLROW "EXTRA_IFNULLROW", #endif @@ -21440,6 +22330,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_INTEGRITY_CHECK_ERROR_MAX "INTEGRITY_CHECK_ERROR_MAX=" CTIMEOPT_VAL(SQLITE_INTEGRITY_CHECK_ERROR_MAX), #endif +#ifdef SQLITE_LEGACY_JSON_VALID + "LEGACY_JSON_VALID", +#endif #ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS "LIKE_DOESNT_MATCH_BLOBS", #endif @@ -21677,6 +22570,9 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS "OMIT_SCHEMA_VERSION_PRAGMAS", #endif +#ifdef SQLITE_OMIT_SEH + "OMIT_SEH", +#endif #ifdef SQLITE_OMIT_SHARED_CACHE "OMIT_SHARED_CACHE", #endif @@ -21727,9 +22623,6 @@ static const char * const sqlite3azCompileOpt[] = { #ifdef SQLITE_OMIT_XFER_OPT "OMIT_XFER_OPT", #endif -#ifdef SQLITE_PCACHE_SEPARATE_HEADER - "PCACHE_SEPARATE_HEADER", -#endif #ifdef SQLITE_PERFORMANCE_TRACE "PERFORMANCE_TRACE", #endif @@ -21931,7 +22824,7 @@ SQLITE_PRIVATE const unsigned char *sqlite3aGTb = &sqlite3UpperToLower[256+12-OP ** isalnum() 0x06 ** isxdigit() 0x08 ** toupper() 0x20 -** SQLite identifier character 0x40 +** SQLite identifier character 0x40 $, _, or non-ascii ** Quote character 0x80 ** ** Bit 0x20 is set if the mapped character requires translation to upper @@ -22086,6 +22979,10 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */ 0, /* bSmallMalloc */ 1, /* bExtraSchemaChecks */ + sizeof(LONGDOUBLE_TYPE)>8, /* bUseLongDouble */ +#ifdef SQLITE_DEBUG + 0, /* bJsonSelfcheck */ +#endif 0x7ffffffe, /* mxStrlen */ 0, /* neverCorrupt */ SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */ @@ -22127,6 +23024,9 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { #endif #ifndef SQLITE_UNTESTABLE 0, /* xTestCallback */ +#endif +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + 0, /* mNoVisibleRowid. 0 == allow rowid-in-view */ #endif 0, /* bLocaltimeFault */ 0, /* xAltLocaltime */ @@ -22134,7 +23034,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = { SQLITE_DEFAULT_SORTERREF_SIZE, /* szSorterRef */ 0, /* iPrngSeed */ #ifdef SQLITE_DEBUG - {0,0,0,0,0,0} /* aTune */ + {0,0,0,0,0,0}, /* aTune */ #endif }; @@ -22218,10 +23118,6 @@ SQLITE_PRIVATE const char sqlite3StrBINARY[] = "BINARY"; ** ** sqlite3StdTypeAffinity[] The affinity associated with each entry ** in sqlite3StdType[]. -** -** sqlite3StdTypeMap[] The type value (as returned from -** sqlite3_column_type() or sqlite3_value_type()) -** for each entry in sqlite3StdType[]. */ SQLITE_PRIVATE const unsigned char sqlite3StdTypeLen[] = { 3, 4, 3, 7, 4, 4 }; SQLITE_PRIVATE const char sqlite3StdTypeAffinity[] = { @@ -22232,14 +23128,6 @@ SQLITE_PRIVATE const char sqlite3StdTypeAffinity[] = { SQLITE_AFF_REAL, SQLITE_AFF_TEXT }; -SQLITE_PRIVATE const char sqlite3StdTypeMap[] = { - 0, - SQLITE_BLOB, - SQLITE_INTEGER, - SQLITE_INTEGER, - SQLITE_FLOAT, - SQLITE_TEXT -}; SQLITE_PRIVATE const char *sqlite3StdType[] = { "ANY", "BLOB", @@ -22327,6 +23215,9 @@ typedef struct VdbeSorter VdbeSorter; /* Elements of the linked list at Vdbe.pAuxData */ typedef struct AuxData AuxData; +/* A cache of large TEXT or BLOB values in a VdbeCursor */ +typedef struct VdbeTxtBlbCache VdbeTxtBlbCache; + /* Types of VDBE cursors */ #define CURTYPE_BTREE 0 #define CURTYPE_SORTER 1 @@ -22358,6 +23249,7 @@ struct VdbeCursor { Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */ Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */ Bool noReuse:1; /* OpenEphemeral may not reuse this cursor */ + Bool colCache:1; /* pCache pointer is initialized and non-NULL */ u16 seekHit; /* See the OP_SeekHit and OP_IfNoHope opcodes */ union { /* pBtx for isEphermeral. pAltMap otherwise */ Btree *pBtx; /* Separate file holding temporary table */ @@ -22398,6 +23290,7 @@ struct VdbeCursor { #ifdef SQLITE_ENABLE_COLUMN_USED_MASK u64 maskUsed; /* Mask of columns used by this cursor */ #endif + VdbeTxtBlbCache *pCache; /* Cache of large TEXT or BLOB values */ /* 2*nField extra array elements allocated for aType[], beyond the one ** static element declared in the structure. nField total array slots for @@ -22410,12 +23303,25 @@ struct VdbeCursor { #define IsNullCursor(P) \ ((P)->eCurType==CURTYPE_PSEUDO && (P)->nullRow && (P)->seekResult==0) - /* ** A value for VdbeCursor.cacheStatus that means the cache is always invalid. */ #define CACHE_STALE 0 +/* +** Large TEXT or BLOB values can be slow to load, so we want to avoid +** loading them more than once. For that reason, large TEXT and BLOB values +** can be stored in a cache defined by this object, and attached to the +** VdbeCursor using the pCache field. +*/ +struct VdbeTxtBlbCache { + char *pCValue; /* A RCStr buffer to hold the value */ + i64 iOffset; /* File offset of the row being cached */ + int iCol; /* Column for which the cache is valid */ + u32 cacheStatus; /* Vdbe.cacheCtr value */ + u32 colCacheCtr; /* Column cache counter */ +}; + /* ** When a sub-program is executed (OP_Program), a structure of this type ** is allocated to store the current value of the program counter, as @@ -22442,7 +23348,6 @@ struct VdbeFrame { Vdbe *v; /* VM this frame belongs to */ VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */ Op *aOp; /* Program instructions for parent frame */ - i64 *anExec; /* Event counters from parent frame */ Mem *aMem; /* Array of memory cells for parent frame */ VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */ u8 *aOnce; /* Bitmask used by OP_Once */ @@ -22658,10 +23563,19 @@ typedef unsigned bft; /* Bit Field Type */ /* The ScanStatus object holds a single value for the ** sqlite3_stmt_scanstatus() interface. +** +** aAddrRange[]: +** This array is used by ScanStatus elements associated with EQP +** notes that make an SQLITE_SCANSTAT_NCYCLE value available. It is +** an array of up to 3 ranges of VM addresses for which the Vdbe.anCycle[] +** values should be summed to calculate the NCYCLE value. Each pair of +** integer addresses is a start and end address (both inclusive) for a range +** instructions. A start value of 0 indicates an empty range. */ typedef struct ScanStatus ScanStatus; struct ScanStatus { int addrExplain; /* OP_Explain for loop */ + int aAddrRange[6]; int addrLoop; /* Address of "loops" counter */ int addrVisit; /* Address of "rows visited" counter */ int iSelectID; /* The "Select-ID" for this loop */ @@ -22691,7 +23605,7 @@ struct DblquoteStr { */ struct Vdbe { sqlite3 *db; /* The database connection that owns this statement */ - Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */ + Vdbe **ppVPrev,*pVNext; /* Linked list of VDBEs with the same Vdbe.db */ Parse *pParse; /* Parsing context used to create this Vdbe */ ynVar nVar; /* Number of entries in aVar[] */ int nMem; /* Number of memory locations currently allocated */ @@ -22717,7 +23631,7 @@ struct Vdbe { int nOp; /* Number of instructions in the program */ int nOpAlloc; /* Slots allocated for aOp[] */ Mem *aColName; /* Column names to return */ - Mem *pResultSet; /* Pointer to an array of results */ + Mem *pResultRow; /* Current output row */ char *zErrMsg; /* Error message written here */ VList *pVList; /* Name of variables */ #ifndef SQLITE_OMIT_TRACE @@ -22728,16 +23642,18 @@ struct Vdbe { u32 nWrite; /* Number of write operations that have occurred */ #endif u16 nResColumn; /* Number of columns in one row of the result set */ + u16 nResAlloc; /* Column slots allocated to aColName[] */ u8 errorAction; /* Recovery action to do in case of an error */ u8 minWriteFileFormat; /* Minimum file format for writable database files */ u8 prepFlags; /* SQLITE_PREPARE_* flags */ u8 eVdbeState; /* On of the VDBE_*_STATE values */ bft expired:2; /* 1: recompile VM immediately 2: when convenient */ - bft explain:2; /* True if EXPLAIN present on SQL command */ + bft explain:2; /* 0: normal, 1: EXPLAIN, 2: EXPLAIN QUERY PLAN */ bft changeCntOn:1; /* True to update the change-counter */ bft usesStmtJournal:1; /* True if uses a statement journal */ bft readOnly:1; /* True for statements that do not write */ bft bIsReader:1; /* True for statements that read */ + bft haveEqpOps:1; /* Bytecode supports EXPLAIN QUERY PLAN */ yDbMask btreeMask; /* Bitmask of db->aDb[] entries referenced */ yDbMask lockMask; /* Subset of btreeMask that requires a lock */ u32 aCounter[9]; /* Counters used by sqlite3_stmt_status() */ @@ -22754,7 +23670,6 @@ struct Vdbe { SubProgram *pProgram; /* Linked list of all sub-programs used by VM */ AuxData *pAuxData; /* Linked list of auxdata allocations */ #ifdef SQLITE_ENABLE_STMT_SCANSTATUS - i64 *anExec; /* Number of times each op has been executed */ int nScan; /* Entries in aScan[] */ ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */ #endif @@ -22785,7 +23700,7 @@ struct PreUpdate { i64 iKey1; /* First key value passed to hook */ i64 iKey2; /* Second key value passed to hook */ Mem *aNew; /* Array of new.* values */ - Table *pTab; /* Schema object being upated */ + Table *pTab; /* Schema object being updated */ Index *pPk; /* PK index if pTab is WITHOUT ROWID */ }; @@ -22875,6 +23790,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetZeroBlob(Mem*,int); SQLITE_PRIVATE int sqlite3VdbeMemIsRowSet(const Mem*); #endif SQLITE_PRIVATE int sqlite3VdbeMemSetRowSet(Mem*); +SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemMakeWriteable(Mem*); SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem*, u8, u8); SQLITE_PRIVATE int sqlite3IntFloatCompare(i64,double); @@ -22921,6 +23837,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *); SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *); SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *); +SQLITE_PRIVATE void sqlite3VdbeValueListFree(void*); + #ifdef SQLITE_DEBUG SQLITE_PRIVATE void sqlite3VdbeIncrWriteCounter(Vdbe*, VdbeCursor*); SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe*); @@ -23249,6 +24167,8 @@ SQLITE_API int sqlite3_db_status( sqlite3BtreeEnterAll(db); db->pnBytesFreed = &nByte; + assert( db->lookaside.pEnd==db->lookaside.pTrueEnd ); + db->lookaside.pEnd = db->lookaside.pStart; for(i=0; inDb; i++){ Schema *pSchema = db->aDb[i].pSchema; if( ALWAYS(pSchema!=0) ){ @@ -23274,6 +24194,7 @@ SQLITE_API int sqlite3_db_status( } } db->pnBytesFreed = 0; + db->lookaside.pEnd = db->lookaside.pTrueEnd; sqlite3BtreeLeaveAll(db); *pHighwater = 0; @@ -23291,9 +24212,12 @@ SQLITE_API int sqlite3_db_status( int nByte = 0; /* Used to accumulate return value */ db->pnBytesFreed = &nByte; - for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pNext){ + assert( db->lookaside.pEnd==db->lookaside.pTrueEnd ); + db->lookaside.pEnd = db->lookaside.pStart; + for(pVdbe=db->pVdbe; pVdbe; pVdbe=pVdbe->pVNext){ sqlite3VdbeDelete(pVdbe); } + db->lookaside.pEnd = db->lookaside.pTrueEnd; db->pnBytesFreed = 0; *pHighwater = 0; /* IMP: R-64479-57858 */ @@ -23314,7 +24238,7 @@ SQLITE_API int sqlite3_db_status( case SQLITE_DBSTATUS_CACHE_MISS: case SQLITE_DBSTATUS_CACHE_WRITE:{ int i; - int nRet = 0; + u64 nRet = 0; assert( SQLITE_DBSTATUS_CACHE_MISS==SQLITE_DBSTATUS_CACHE_HIT+1 ); assert( SQLITE_DBSTATUS_CACHE_WRITE==SQLITE_DBSTATUS_CACHE_HIT+2 ); @@ -23327,7 +24251,7 @@ SQLITE_API int sqlite3_db_status( *pHighwater = 0; /* IMP: R-42420-56072 */ /* IMP: R-54100-20147 */ /* IMP: R-29431-39229 */ - *pCurrent = nRet; + *pCurrent = (int)nRet & 0x7fffffff; break; } @@ -23430,6 +24354,7 @@ struct DateTime { char validTZ; /* True (1) if tz is valid */ char tzSet; /* Timezone was set explicitly */ char isError; /* An overflow has occurred */ + char useSubsec; /* Display subsecond precision */ }; @@ -23462,8 +24387,8 @@ struct DateTime { */ static int getDigits(const char *zDate, const char *zFormat, ...){ /* The aMx[] array translates the 3rd character of each format - ** spec into a max size: a b c d e f */ - static const u16 aMx[] = { 12, 14, 24, 31, 59, 9999 }; + ** spec into a max size: a b c d e f */ + static const u16 aMx[] = { 12, 14, 24, 31, 59, 14712 }; va_list ap; int cnt = 0; char nextC; @@ -23629,7 +24554,7 @@ static void computeJD(DateTime *p){ p->iJD = (sqlite3_int64)((X1 + X2 + D + B - 1524.5 ) * 86400000); p->validJD = 1; if( p->validHMS ){ - p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000); + p->iJD += p->h*3600000 + p->m*60000 + (sqlite3_int64)(p->s*1000 + 0.5); if( p->validTZ ){ p->iJD -= p->tz*60000; p->validYMD = 0; @@ -23744,6 +24669,11 @@ static int parseDateOrTime( }else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8)>0 ){ setRawDateNumber(p, r); return 0; + }else if( (sqlite3StrICmp(zDate,"subsec")==0 + || sqlite3StrICmp(zDate,"subsecond")==0) + && sqlite3NotPureFunc(context) ){ + p->useSubsec = 1; + return setDateTimeToCurrent(context, p); } return 1; } @@ -23799,17 +24729,14 @@ static void computeYMD(DateTime *p){ ** Compute the Hour, Minute, and Seconds from the julian day number. */ static void computeHMS(DateTime *p){ - int s; + int day_ms, day_min; /* milliseconds, minutes into the day */ if( p->validHMS ) return; computeJD(p); - s = (int)((p->iJD + 43200000) % 86400000); - p->s = s/1000.0; - s = (int)p->s; - p->s -= s; - p->h = s/3600; - s -= p->h*3600; - p->m = s/60; - p->s += s - p->m*60; + day_ms = (int)((p->iJD + 43200000) % 86400000); + p->s = (day_ms % 60000)/1000.0; + day_min = day_ms/60000; + p->m = day_min % 60; + p->h = day_min / 60; p->rawS = 0; p->validHMS = 1; } @@ -23988,6 +24915,25 @@ static const struct { { 4, "year", 14713.0, 31536000.0 }, }; +/* +** If the DateTime p is raw number, try to figure out if it is +** a julian day number of a unix timestamp. Set the p value +** appropriately. +*/ +static void autoAdjustDate(DateTime *p){ + if( !p->rawS || p->validJD ){ + p->rawS = 0; + }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */ + && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */ + ){ + double r = p->s*1000.0 + 210866760000000.0; + clearYMD_HMS_TZ(p); + p->iJD = (sqlite3_int64)(r + 0.5); + p->validJD = 1; + p->rawS = 0; + } +} + /* ** Process a modifier to a date-time stamp. The modifiers are ** as follows: @@ -24031,19 +24977,8 @@ static int parseModifier( */ if( sqlite3_stricmp(z, "auto")==0 ){ if( idx>1 ) return 1; /* IMP: R-33611-57934 */ - if( !p->rawS || p->validJD ){ - rc = 0; - p->rawS = 0; - }else if( p->s>=-21086676*(i64)10000 /* -4713-11-24 12:00:00 */ - && p->s<=(25340230*(i64)10000)+799 /* 9999-12-31 23:59:59 */ - ){ - r = p->s*1000.0 + 210866760000000.0; - clearYMD_HMS_TZ(p); - p->iJD = (sqlite3_int64)(r + 0.5); - p->validJD = 1; - p->rawS = 0; - rc = 0; - } + autoAdjustDate(p); + rc = 0; } break; } @@ -24102,7 +25037,7 @@ static int parseModifier( i64 iOrigJD; /* Original localtime */ i64 iGuess; /* Guess at the corresponding utc time */ int cnt = 0; /* Safety to prevent infinite loop */ - int iErr; /* Guess is off by this much */ + i64 iErr; /* Guess is off by this much */ computeJD(p); iGuess = iOrigJD = p->iJD; @@ -24138,7 +25073,7 @@ static int parseModifier( */ if( sqlite3_strnicmp(z, "weekday ", 8)==0 && sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8)>0 - && (n=(int)r)==r && n>=0 && r<7 ){ + && r>=0.0 && r<7.0 && (n=(int)r)==r ){ sqlite3_int64 Z; computeYMD_HMS(p); p->validTZ = 0; @@ -24158,8 +25093,22 @@ static int parseModifier( ** ** Move the date backwards to the beginning of the current day, ** or month or year. + ** + ** subsecond + ** subsec + ** + ** Show subsecond precision in the output of datetime() and + ** unixepoch() and strftime('%s'). */ - if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break; + if( sqlite3_strnicmp(z, "start of ", 9)!=0 ){ + if( sqlite3_stricmp(z, "subsec")==0 + || sqlite3_stricmp(z, "subsecond")==0 + ){ + p->useSubsec = 1; + rc = 0; + } + break; + } if( !p->validJD && !p->validYMD && !p->validHMS ) break; z += 9; computeYMD(p); @@ -24195,18 +25144,73 @@ static int parseModifier( case '9': { double rRounder; int i; - for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){} + int Y,M,D,h,m,x; + const char *z2 = z; + char z0 = z[0]; + for(n=1; z[n]; n++){ + if( z[n]==':' ) break; + if( sqlite3Isspace(z[n]) ) break; + if( z[n]=='-' ){ + if( n==5 && getDigits(&z[1], "40f", &Y)==1 ) break; + if( n==6 && getDigits(&z[1], "50f", &Y)==1 ) break; + } + } if( sqlite3AtoF(z, &r, n, SQLITE_UTF8)<=0 ){ - rc = 1; + assert( rc==1 ); break; } - if( z[n]==':' ){ + if( z[n]=='-' ){ + /* A modifier of the form (+|-)YYYY-MM-DD adds or subtracts the + ** specified number of years, months, and days. MM is limited to + ** the range 0-11 and DD is limited to 0-30. + */ + if( z0!='+' && z0!='-' ) break; /* Must start with +/- */ + if( n==5 ){ + if( getDigits(&z[1], "40f-20a-20d", &Y, &M, &D)!=3 ) break; + }else{ + assert( n==6 ); + if( getDigits(&z[1], "50f-20a-20d", &Y, &M, &D)!=3 ) break; + z++; + } + if( M>=12 ) break; /* M range 0..11 */ + if( D>=31 ) break; /* D range 0..30 */ + computeYMD_HMS(p); + p->validJD = 0; + if( z0=='-' ){ + p->Y -= Y; + p->M -= M; + D = -D; + }else{ + p->Y += Y; + p->M += M; + } + x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12; + p->Y += x; + p->M -= x*12; + computeJD(p); + p->validHMS = 0; + p->validYMD = 0; + p->iJD += (i64)D*86400000; + if( z[11]==0 ){ + rc = 0; + break; + } + if( sqlite3Isspace(z[11]) + && getDigits(&z[12], "20c:20e", &h, &m)==2 + ){ + z2 = &z[12]; + n = 2; + }else{ + break; + } + } + if( z2[n]==':' ){ /* A modifier of the form (+|-)HH:MM:SS.FFF adds (or subtracts) the ** specified number of hours, minutes, seconds, and fractional seconds ** to the time. The ".FFF" may be omitted. The ":SS.FFF" may be ** omitted. */ - const char *z2 = z; + DateTime tx; sqlite3_int64 day; if( !sqlite3Isdigit(*z2) ) z2++; @@ -24216,7 +25220,7 @@ static int parseModifier( tx.iJD -= 43200000; day = tx.iJD/86400000; tx.iJD -= day*86400000; - if( z[0]=='-' ) tx.iJD = -tx.iJD; + if( z0=='-' ) tx.iJD = -tx.iJD; computeJD(p); clearYMD_HMS_TZ(p); p->iJD += tx.iJD; @@ -24232,7 +25236,7 @@ static int parseModifier( if( n>10 || n<3 ) break; if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--; computeJD(p); - rc = 1; + assert( rc==1 ); rRounder = r<0 ? -0.5 : +0.5; for(i=0; iM += (int)r; @@ -24317,6 +25320,12 @@ static int isDate( } computeJD(p); if( p->isError || !validJulianDay(p->iJD) ) return 1; + if( argc==1 && p->validYMD && p->D>28 ){ + /* Make sure a YYYY-MM-DD is normalized. + ** Example: 2023-02-31 -> 2023-03-03 */ + assert( p->validJD ); + p->validYMD = 0; + } return 0; } @@ -24357,7 +25366,11 @@ static void unixepochFunc( DateTime x; if( isDate(context, argc, argv, &x)==0 ){ computeJD(&x); - sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + if( x.useSubsec ){ + sqlite3_result_double(context, (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + sqlite3_result_int64(context, x.iJD/1000 - 21086676*(i64)10000); + } } } @@ -24373,8 +25386,8 @@ static void datetimeFunc( ){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int Y, s; - char zBuf[24]; + int Y, s, n; + char zBuf[32]; computeYMD_HMS(&x); Y = x.Y; if( Y<0 ) Y = -Y; @@ -24395,15 +25408,28 @@ static void datetimeFunc( zBuf[15] = '0' + (x.m/10)%10; zBuf[16] = '0' + (x.m)%10; zBuf[17] = ':'; - s = (int)x.s; - zBuf[18] = '0' + (s/10)%10; - zBuf[19] = '0' + (s)%10; - zBuf[20] = 0; + if( x.useSubsec ){ + s = (int)(1000.0*x.s + 0.5); + zBuf[18] = '0' + (s/10000)%10; + zBuf[19] = '0' + (s/1000)%10; + zBuf[20] = '.'; + zBuf[21] = '0' + (s/100)%10; + zBuf[22] = '0' + (s/10)%10; + zBuf[23] = '0' + (s)%10; + zBuf[24] = 0; + n = 24; + }else{ + s = (int)x.s; + zBuf[18] = '0' + (s/10)%10; + zBuf[19] = '0' + (s)%10; + zBuf[20] = 0; + n = 20; + } if( x.Y<0 ){ zBuf[0] = '-'; - sqlite3_result_text(context, zBuf, 20, SQLITE_TRANSIENT); + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); }else{ - sqlite3_result_text(context, &zBuf[1], 19, SQLITE_TRANSIENT); + sqlite3_result_text(context, &zBuf[1], n-1, SQLITE_TRANSIENT); } } } @@ -24420,7 +25446,7 @@ static void timeFunc( ){ DateTime x; if( isDate(context, argc, argv, &x)==0 ){ - int s; + int s, n; char zBuf[16]; computeHMS(&x); zBuf[0] = '0' + (x.h/10)%10; @@ -24429,11 +25455,24 @@ static void timeFunc( zBuf[3] = '0' + (x.m/10)%10; zBuf[4] = '0' + (x.m)%10; zBuf[5] = ':'; - s = (int)x.s; - zBuf[6] = '0' + (s/10)%10; - zBuf[7] = '0' + (s)%10; - zBuf[8] = 0; - sqlite3_result_text(context, zBuf, 8, SQLITE_TRANSIENT); + if( x.useSubsec ){ + s = (int)(1000.0*x.s + 0.5); + zBuf[6] = '0' + (s/10000)%10; + zBuf[7] = '0' + (s/1000)%10; + zBuf[8] = '.'; + zBuf[9] = '0' + (s/100)%10; + zBuf[10] = '0' + (s/10)%10; + zBuf[11] = '0' + (s)%10; + zBuf[12] = 0; + n = 12; + }else{ + s = (int)x.s; + zBuf[6] = '0' + (s/10)%10; + zBuf[7] = '0' + (s)%10; + zBuf[8] = 0; + n = 8; + } + sqlite3_result_text(context, zBuf, n, SQLITE_TRANSIENT); } } @@ -24488,7 +25527,7 @@ static void dateFunc( ** %M minute 00-59 ** %s seconds since 1970-01-01 ** %S seconds 00-59 -** %w day of week 0-6 sunday==0 +** %w day of week 0-6 Sunday==0 ** %W week of year 00-53 ** %Y year 0000-9999 ** %% % @@ -24514,13 +25553,16 @@ static void strftimeFunc( computeJD(&x); computeYMD_HMS(&x); for(i=j=0; zFmt[i]; i++){ + char cf; if( zFmt[i]!='%' ) continue; if( j12 ) h -= 12; + if( h==0 ) h = 12; + sqlite3_str_appendf(&sRes, cf=='I' ? "%02d" : "%2d", h); break; } case 'W': /* Fall thru */ @@ -24542,7 +25597,7 @@ static void strftimeFunc( y.D = 1; computeJD(&y); nDay = (int)((x.iJD-y.iJD+43200000)/86400000); - if( zFmt[i]=='W' ){ + if( cf=='W' ){ int wd; /* 0=Monday, 1=Tuesday, ... 6=Sunday */ wd = (int)(((x.iJD+43200000)/86400000)%7); sqlite3_str_appendf(&sRes,"%02d",(nDay+7-wd)/7); @@ -24563,18 +25618,42 @@ static void strftimeFunc( sqlite3_str_appendf(&sRes,"%02d",x.m); break; } + case 'p': /* Fall thru */ + case 'P': { + if( x.h>=12 ){ + sqlite3_str_append(&sRes, cf=='p' ? "PM" : "pm", 2); + }else{ + sqlite3_str_append(&sRes, cf=='p' ? "AM" : "am", 2); + } + break; + } + case 'R': { + sqlite3_str_appendf(&sRes, "%02d:%02d", x.h, x.m); + break; + } case 's': { - i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); - sqlite3_str_appendf(&sRes,"%lld",iS); + if( x.useSubsec ){ + sqlite3_str_appendf(&sRes,"%.3f", + (x.iJD - 21086676*(i64)10000000)/1000.0); + }else{ + i64 iS = (i64)(x.iJD/1000 - 21086676*(i64)10000); + sqlite3_str_appendf(&sRes,"%lld",iS); + } break; } case 'S': { sqlite3_str_appendf(&sRes,"%02d",(int)x.s); break; } + case 'T': { + sqlite3_str_appendf(&sRes,"%02d:%02d:%02d", x.h, x.m, (int)x.s); + break; + } + case 'u': /* Fall thru */ case 'w': { - sqlite3_str_appendchar(&sRes, 1, - (char)(((x.iJD+129600000)/86400000) % 7) + '0'); + char c = (char)(((x.iJD+129600000)/86400000) % 7) + '0'; + if( c=='0' && cf=='u' ) c = '7'; + sqlite3_str_appendchar(&sRes, 1, c); break; } case 'Y': { @@ -24623,6 +25702,117 @@ static void cdateFunc( dateFunc(context, 0, 0); } +/* +** timediff(DATE1, DATE2) +** +** Return the amount of time that must be added to DATE2 in order to +** convert it into DATE2. The time difference format is: +** +** +YYYY-MM-DD HH:MM:SS.SSS +** +** The initial "+" becomes "-" if DATE1 occurs before DATE2. For +** date/time values A and B, the following invariant should hold: +** +** datetime(A) == (datetime(B, timediff(A,B)) +** +** Both DATE arguments must be either a julian day number, or an +** ISO-8601 string. The unix timestamps are not supported by this +** routine. +*/ +static void timediffFunc( + sqlite3_context *context, + int NotUsed1, + sqlite3_value **argv +){ + char sign; + int Y, M; + DateTime d1, d2; + sqlite3_str sRes; + UNUSED_PARAMETER(NotUsed1); + if( isDate(context, 1, &argv[0], &d1) ) return; + if( isDate(context, 1, &argv[1], &d2) ) return; + computeYMD_HMS(&d1); + computeYMD_HMS(&d2); + if( d1.iJD>=d2.iJD ){ + sign = '+'; + Y = d1.Y - d2.Y; + if( Y ){ + d2.Y = d1.Y; + d2.validJD = 0; + computeJD(&d2); + } + M = d1.M - d2.M; + if( M<0 ){ + Y--; + M += 12; + } + if( M!=0 ){ + d2.M = d1.M; + d2.validJD = 0; + computeJD(&d2); + } + while( d1.iJDd2.iJD ){ + M--; + if( M<0 ){ + M = 11; + Y--; + } + d2.M++; + if( d2.M>12 ){ + d2.M = 1; + d2.Y++; + } + d2.validJD = 0; + computeJD(&d2); + } + d1.iJD = d2.iJD - d1.iJD; + d1.iJD += (u64)1486995408 * (u64)100000; + } + d1.validYMD = 0; + d1.validHMS = 0; + d1.validTZ = 0; + computeYMD_HMS(&d1); + sqlite3StrAccumInit(&sRes, 0, 0, 0, 100); + sqlite3_str_appendf(&sRes, "%c%04d-%02d-%02d %02d:%02d:%06.3f", + sign, Y, M, d1.D-1, d1.h, d1.m, d1.s); + sqlite3ResultStrAccum(context, &sRes); +} + + /* ** current_timestamp() ** @@ -24697,6 +25887,7 @@ SQLITE_PRIVATE void sqlite3RegisterDateTimeFunctions(void){ PURE_DATE(time, -1, 0, 0, timeFunc ), PURE_DATE(datetime, -1, 0, 0, datetimeFunc ), PURE_DATE(strftime, -1, 0, 0, strftimeFunc ), + PURE_DATE(timediff, 2, 0, 0, timediffFunc ), DFUNCTION(current_time, 0, 0, 0, ctimeFunc ), DFUNCTION(current_timestamp, 0, 0, 0, ctimestampFunc), DFUNCTION(current_date, 0, 0, 0, cdateFunc ), @@ -24819,9 +26010,11 @@ SQLITE_PRIVATE int sqlite3OsFileSize(sqlite3_file *id, i64 *pSize){ } SQLITE_PRIVATE int sqlite3OsLock(sqlite3_file *id, int lockType){ DO_OS_MALLOC_TEST(id); + assert( lockType>=SQLITE_LOCK_SHARED && lockType<=SQLITE_LOCK_EXCLUSIVE ); return id->pMethods->xLock(id, lockType); } SQLITE_PRIVATE int sqlite3OsUnlock(sqlite3_file *id, int lockType){ + assert( lockType==SQLITE_LOCK_NONE || lockType==SQLITE_LOCK_SHARED ); return id->pMethods->xUnlock(id, lockType); } SQLITE_PRIVATE int sqlite3OsCheckReservedLock(sqlite3_file *id, int *pResOut){ @@ -24848,7 +26041,7 @@ SQLITE_PRIVATE int sqlite3OsFileControl(sqlite3_file *id, int op, void *pArg){ /* Faults are not injected into COMMIT_PHASETWO because, assuming SQLite ** is using a regular VFS, it is called after the corresponding ** transaction has been committed. Injecting a fault at this point - ** confuses the test scripts - the COMMIT comand returns SQLITE_NOMEM + ** confuses the test scripts - the COMMIT command returns SQLITE_NOMEM ** but the transaction is committed anyway. ** ** The core must call OsFileControl() though, not OsFileControlHint(), @@ -24936,6 +26129,7 @@ SQLITE_PRIVATE int sqlite3OsOpen( ** down into the VFS layer. Some SQLITE_OPEN_ flags (for example, ** SQLITE_OPEN_FULLMUTEX or SQLITE_OPEN_SHAREDCACHE) are blocked before ** reaching the VFS. */ + assert( zPath || (flags & SQLITE_OPEN_EXCLUSIVE) ); rc = pVfs->xOpen(pVfs, zPath, pFile, flags & 0x1087f7f, pFlagsOut); assert( rc==SQLITE_OK || pFile->pMethods==0 ); return rc; @@ -25468,7 +26662,7 @@ static void *sqlite3MemMalloc(int nByte){ ** or sqlite3MemRealloc(). ** ** For this low-level routine, we already know that pPrior!=0 since -** cases where pPrior==0 will have been intecepted and dealt with +** cases where pPrior==0 will have been intercepted and dealt with ** by higher-level routines. */ static void sqlite3MemFree(void *pPrior){ @@ -25556,7 +26750,7 @@ static int sqlite3MemInit(void *NotUsed){ return SQLITE_OK; } len = sizeof(cpuCount); - /* One usually wants to use hw.acctivecpu for MT decisions, but not here */ + /* One usually wants to use hw.activecpu for MT decisions, but not here */ sysctlbyname("hw.ncpu", &cpuCount, &len, NULL, 0); if( cpuCount>1 ){ /* defer MT decisions to system malloc */ @@ -27251,9 +28445,13 @@ static int memsys5Roundup(int n){ if( n<=mem5.szAtom ) return mem5.szAtom; return mem5.szAtom*2; } - if( n>0x40000000 ) return 0; + if( n>0x10000000 ){ + if( n>0x40000000 ) return 0; + if( n>0x20000000 ) return 0x40000000; + return 0x20000000; + } for(iFullSz=mem5.szAtom*8; iFullSz=n ) return iFullSz/2; + if( (iFullSz/2)>=(i64)n ) return iFullSz/2; return iFullSz; } @@ -27544,7 +28742,7 @@ static void checkMutexFree(sqlite3_mutex *p){ assert( SQLITE_MUTEX_FAST<2 ); assert( SQLITE_MUTEX_WARNONCONTENTION<2 ); -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( ((CheckMutex*)p)->iType<2 ) #endif { @@ -28019,7 +29217,7 @@ SQLITE_PRIVATE sqlite3_mutex_methods const *sqlite3DefaultMutex(void){ /* ** The sqlite3_mutex.id, sqlite3_mutex.nRef, and sqlite3_mutex.owner fields -** are necessary under two condidtions: (1) Debug builds and (2) using +** are necessary under two conditions: (1) Debug builds and (2) using ** home-grown mutexes. Encapsulate these conditions into a single #define. */ #if defined(SQLITE_DEBUG) || defined(SQLITE_HOMEGROWN_RECURSIVE_MUTEX) @@ -28216,7 +29414,7 @@ static sqlite3_mutex *pthreadMutexAlloc(int iType){ */ static void pthreadMutexFree(sqlite3_mutex *p){ assert( p->nRef==0 ); -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( p->id==SQLITE_MUTEX_FAST || p->id==SQLITE_MUTEX_RECURSIVE ) #endif { @@ -28520,7 +29718,7 @@ struct sqlite3_mutex { CRITICAL_SECTION mutex; /* Mutex controlling the lock */ int id; /* Mutex type */ #ifdef SQLITE_DEBUG - volatile int nRef; /* Number of enterances */ + volatile int nRef; /* Number of entrances */ volatile DWORD owner; /* Thread holding this mutex */ volatile LONG trace; /* True to trace changes */ #endif @@ -28569,7 +29767,7 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){ SQLITE_MEMORY_BARRIER; #elif defined(__GNUC__) __sync_synchronize(); -#elif MSVC_VERSION>=1300 +#elif MSVC_VERSION>=1400 _ReadWriteBarrier(); #elif defined(MemoryBarrier) MemoryBarrier(); @@ -29165,18 +30363,34 @@ static void mallocWithAlarm(int n, void **pp){ *pp = p; } +/* +** Maximum size of any single memory allocation. +** +** This is not a limit on the total amount of memory used. This is +** a limit on the size parameter to sqlite3_malloc() and sqlite3_realloc(). +** +** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391 +** This provides a 256-byte safety margin for defense against 32-bit +** signed integer overflow bugs when computing memory allocation sizes. +** Paranoid applications might want to reduce the maximum allocation size +** further for an even larger safety margin. 0x3fffffff or 0x0fffffff +** or even smaller would be reasonable upper bounds on the size of a memory +** allocations for most applications. +*/ +#ifndef SQLITE_MAX_ALLOCATION_SIZE +# define SQLITE_MAX_ALLOCATION_SIZE 2147483391 +#endif +#if SQLITE_MAX_ALLOCATION_SIZE>2147483391 +# error Maximum size for SQLITE_MAX_ALLOCATION_SIZE is 2147483391 +#endif + /* ** Allocate memory. This routine is like sqlite3_malloc() except that it ** assumes the memory subsystem has already been initialized. */ SQLITE_PRIVATE void *sqlite3Malloc(u64 n){ void *p; - if( n==0 || n>=0x7fffff00 ){ - /* A memory allocation of a number of bytes which is near the maximum - ** signed integer value might cause an integer overflow inside of the - ** xMalloc(). Hence we limit the maximum size to 0x7fffff00, giving - ** 255 bytes of overhead. SQLite itself will never use anything near - ** this amount. The only way to reach the limit is with sqlite3_malloc() */ + if( n==0 || n>SQLITE_MAX_ALLOCATION_SIZE ){ p = 0; }else if( sqlite3GlobalConfig.bMemstat ){ sqlite3_mutex_enter(mem0.mutex); @@ -29212,7 +30426,7 @@ SQLITE_API void *sqlite3_malloc64(sqlite3_uint64 n){ */ #ifndef SQLITE_OMIT_LOOKASIDE static int isLookaside(sqlite3 *db, const void *p){ - return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pEnd); + return SQLITE_WITHIN(p, db->lookaside.pStart, db->lookaside.pTrueEnd); } #else #define isLookaside(A,B) 0 @@ -29236,18 +30450,16 @@ static int lookasideMallocSize(sqlite3 *db, const void *p){ SQLITE_PRIVATE int sqlite3DbMallocSize(sqlite3 *db, const void *p){ assert( p!=0 ); #ifdef SQLITE_DEBUG - if( db==0 || !isLookaside(db,p) ){ - if( db==0 ){ - assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); - assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); - }else{ - assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); - assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); - } + if( db==0 ){ + assert( sqlite3MemdebugNoType(p, (u8)~MEMTYPE_HEAP) ); + assert( sqlite3MemdebugHasType(p, MEMTYPE_HEAP) ); + }else if( !isLookaside(db,p) ){ + assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); } #endif if( db ){ - if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){ + if( ((uptr)p)<(uptr)(db->lookaside.pTrueEnd) ){ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){ assert( sqlite3_mutex_held(db->mutex) ); @@ -29303,14 +30515,11 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){ assert( db==0 || sqlite3_mutex_held(db->mutex) ); assert( p!=0 ); if( db ){ - if( db->pnBytesFreed ){ - measureAllocationSize(db, p); - return; - } if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){ #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){ LookasideSlot *pBuf = (LookasideSlot*)p; + assert( db->pnBytesFreed==0 ); #ifdef SQLITE_DEBUG memset(p, 0xaa, LOOKASIDE_SMALL); /* Trash freed content */ #endif @@ -29321,6 +30530,7 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){ #endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ if( ((uptr)p)>=(uptr)(db->lookaside.pStart) ){ LookasideSlot *pBuf = (LookasideSlot*)p; + assert( db->pnBytesFreed==0 ); #ifdef SQLITE_DEBUG memset(p, 0xaa, db->lookaside.szTrue); /* Trash freed content */ #endif @@ -29329,6 +30539,10 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){ return; } } + if( db->pnBytesFreed ){ + measureAllocationSize(db, p); + return; + } } assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); @@ -29336,6 +30550,43 @@ SQLITE_PRIVATE void sqlite3DbFreeNN(sqlite3 *db, void *p){ sqlite3MemdebugSetType(p, MEMTYPE_HEAP); sqlite3_free(p); } +SQLITE_PRIVATE void sqlite3DbNNFreeNN(sqlite3 *db, void *p){ + assert( db!=0 ); + assert( sqlite3_mutex_held(db->mutex) ); + assert( p!=0 ); + if( ((uptr)p)<(uptr)(db->lookaside.pEnd) ){ +#ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE + if( ((uptr)p)>=(uptr)(db->lookaside.pMiddle) ){ + LookasideSlot *pBuf = (LookasideSlot*)p; + assert( db->pnBytesFreed==0 ); +#ifdef SQLITE_DEBUG + memset(p, 0xaa, LOOKASIDE_SMALL); /* Trash freed content */ +#endif + pBuf->pNext = db->lookaside.pSmallFree; + db->lookaside.pSmallFree = pBuf; + return; + } +#endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ + if( ((uptr)p)>=(uptr)(db->lookaside.pStart) ){ + LookasideSlot *pBuf = (LookasideSlot*)p; + assert( db->pnBytesFreed==0 ); +#ifdef SQLITE_DEBUG + memset(p, 0xaa, db->lookaside.szTrue); /* Trash freed content */ +#endif + pBuf->pNext = db->lookaside.pFree; + db->lookaside.pFree = pBuf; + return; + } + } + if( db->pnBytesFreed ){ + measureAllocationSize(db, p); + return; + } + assert( sqlite3MemdebugHasType(p, (MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + assert( sqlite3MemdebugNoType(p, (u8)~(MEMTYPE_LOOKASIDE|MEMTYPE_HEAP)) ); + sqlite3MemdebugSetType(p, MEMTYPE_HEAP); + sqlite3_free(p); +} SQLITE_PRIVATE void sqlite3DbFree(sqlite3 *db, void *p){ assert( db==0 || sqlite3_mutex_held(db->mutex) ); if( p ) sqlite3DbFreeNN(db, p); @@ -29635,9 +30886,14 @@ SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){ */ SQLITE_PRIVATE char *sqlite3DbSpanDup(sqlite3 *db, const char *zStart, const char *zEnd){ int n; +#ifdef SQLITE_DEBUG + /* Because of the way the parser works, the span is guaranteed to contain + ** at least one non-space character */ + for(n=0; sqlite3Isspace(zStart[n]); n++){ assert( &zStart[n]0) && sqlite3Isspace(zStart[n-1]) ) n--; + while( sqlite3Isspace(zStart[n-1]) ) n--; return sqlite3DbStrNDup(db, zStart, n); } @@ -29733,7 +30989,7 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3* db, int rc){ if( db->mallocFailed || rc ){ return apiHandleError(db, rc); } - return rc & db->errMask; + return 0; } /************** End of malloc.c **********************************************/ @@ -29845,43 +31101,6 @@ static const et_info fmtinfo[] = { ** %!S Like %S but prefer the zName over the zAlias */ -/* Floating point constants used for rounding */ -static const double arRound[] = { - 5.0e-01, 5.0e-02, 5.0e-03, 5.0e-04, 5.0e-05, - 5.0e-06, 5.0e-07, 5.0e-08, 5.0e-09, 5.0e-10, -}; - -/* -** If SQLITE_OMIT_FLOATING_POINT is defined, then none of the floating point -** conversions will work. -*/ -#ifndef SQLITE_OMIT_FLOATING_POINT -/* -** "*val" is a double such that 0.1 <= *val < 10.0 -** Return the ascii code for the leading digit of *val, then -** multiply "*val" by 10.0 to renormalize. -** -** Example: -** input: *val = 3.14159 -** output: *val = 1.4159 function return = '3' -** -** The counter *cnt is incremented each time. After counter exceeds -** 16 (the number of significant digits in a 64-bit float) '0' is -** always returned. -*/ -static char et_getdigit(LONGDOUBLE_TYPE *val, int *cnt){ - int digit; - LONGDOUBLE_TYPE d; - if( (*cnt)<=0 ) return '0'; - (*cnt)--; - digit = (int)*val; - d = digit; - digit += '0'; - *val = (*val - d)*10.0; - return (char)digit; -} -#endif /* SQLITE_OMIT_FLOATING_POINT */ - /* ** Set the StrAccum object to an error mode. */ @@ -29973,18 +31192,15 @@ SQLITE_API void sqlite3_str_vappendf( u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */ char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */ sqlite_uint64 longvalue; /* Value for integer types */ - LONGDOUBLE_TYPE realvalue; /* Value for real types */ + double realvalue; /* Value for real types */ const et_info *infop; /* Pointer to the appropriate info structure */ char *zOut; /* Rendering buffer */ int nOut; /* Size of the rendering buffer */ char *zExtra = 0; /* Malloced memory used by some conversion */ -#ifndef SQLITE_OMIT_FLOATING_POINT - int exp, e2; /* exponent of real numbers */ - int nsd; /* Number of significant digits returned */ - double rounder; /* Used for rounding floating point values */ + int exp, e2; /* exponent of real numbers */ etByte flag_dp; /* True if decimal point should be shown */ etByte flag_rtz; /* True if trailing zeros should be removed */ -#endif + PrintfArguments *pArgList = 0; /* Arguments for SQLITE_PRINTF_SQLFUNC */ char buf[etBUFSIZE]; /* Conversion buffer */ @@ -30259,73 +31475,67 @@ SQLITE_API void sqlite3_str_vappendf( break; case etFLOAT: case etEXP: - case etGENERIC: + case etGENERIC: { + FpDecode s; + int iRound; + int j; + if( bArgList ){ realvalue = getDoubleArg(pArgList); }else{ realvalue = va_arg(ap,double); } -#ifdef SQLITE_OMIT_FLOATING_POINT - length = 0; -#else if( precision<0 ) precision = 6; /* Set default precision */ #ifdef SQLITE_FP_PRECISION_LIMIT if( precision>SQLITE_FP_PRECISION_LIMIT ){ precision = SQLITE_FP_PRECISION_LIMIT; } #endif - if( realvalue<0.0 ){ - realvalue = -realvalue; - prefix = '-'; - }else{ - prefix = flag_prefix; - } - if( xtype==etGENERIC && precision>0 ) precision--; - testcase( precision>0xfff ); - idx = precision & 0xfff; - rounder = arRound[idx%10]; - while( idx>=10 ){ rounder *= 1.0e-10; idx -= 10; } if( xtype==etFLOAT ){ - double rx = (double)realvalue; - sqlite3_uint64 u; - int ex; - memcpy(&u, &rx, sizeof(u)); - ex = -1023 + (int)((u>>52)&0x7ff); - if( precision+(ex/3) < 15 ) rounder += realvalue*3e-16; - realvalue += rounder; - } - /* Normalize realvalue to within 10.0 > realvalue >= 1.0 */ - exp = 0; - if( sqlite3IsNaN((double)realvalue) ){ - bufpt = "NaN"; - length = 3; - break; + iRound = -precision; + }else if( xtype==etGENERIC ){ + if( precision==0 ) precision = 1; + iRound = precision; + }else{ + iRound = precision+1; } - if( realvalue>0.0 ){ - LONGDOUBLE_TYPE scale = 1.0; - while( realvalue>=1e100*scale && exp<=350 ){ scale *= 1e100;exp+=100;} - while( realvalue>=1e10*scale && exp<=350 ){ scale *= 1e10; exp+=10; } - while( realvalue>=10.0*scale && exp<=350 ){ scale *= 10.0; exp++; } - realvalue /= scale; - while( realvalue<1e-8 ){ realvalue *= 1e8; exp-=8; } - while( realvalue<1.0 ){ realvalue *= 10.0; exp--; } - if( exp>350 ){ + sqlite3FpDecode(&s, realvalue, iRound, flag_altform2 ? 26 : 16); + if( s.isSpecial ){ + if( s.isSpecial==2 ){ + bufpt = flag_zeropad ? "null" : "NaN"; + length = sqlite3Strlen30(bufpt); + break; + }else if( flag_zeropad ){ + s.z[0] = '9'; + s.iDP = 1000; + s.n = 1; + }else{ + memcpy(buf, "-Inf", 5); bufpt = buf; - buf[0] = prefix; - memcpy(buf+(prefix!=0),"Inf",4); - length = 3+(prefix!=0); + if( s.sign=='-' ){ + /* no-op */ + }else if( flag_prefix ){ + buf[0] = flag_prefix; + }else{ + bufpt++; + } + length = sqlite3Strlen30(bufpt); break; } } - bufpt = buf; + if( s.sign=='-' ){ + prefix = '-'; + }else{ + prefix = flag_prefix; + } + + exp = s.iDP-1; + if( xtype==etGENERIC && precision>0 ) precision--; + /* ** If the field type is etGENERIC, then convert to either etEXP ** or etFLOAT, as appropriate. */ - if( xtype!=etFLOAT ){ - realvalue += rounder; - if( realvalue>=10.0 ){ realvalue *= 0.1; exp++; } - } if( xtype==etGENERIC ){ flag_rtz = !flag_alternateform; if( exp<-4 || exp>precision ){ @@ -30340,29 +31550,32 @@ SQLITE_API void sqlite3_str_vappendf( if( xtype==etEXP ){ e2 = 0; }else{ - e2 = exp; + e2 = s.iDP - 1; } + bufpt = buf; { i64 szBufNeeded; /* Size of a temporary buffer needed */ szBufNeeded = MAX(e2,0)+(i64)precision+(i64)width+15; + if( cThousand && e2>0 ) szBufNeeded += (e2+2)/3; if( szBufNeeded > etBUFSIZE ){ bufpt = zExtra = printfTempBuf(pAccum, szBufNeeded); if( bufpt==0 ) return; } } zOut = bufpt; - nsd = 16 + flag_altform2*10; flag_dp = (precision>0 ?1:0) | flag_alternateform | flag_altform2; /* The sign in front of the number */ if( prefix ){ *(bufpt++) = prefix; } /* Digits prior to the decimal point */ + j = 0; if( e2<0 ){ *(bufpt++) = '0'; }else{ for(; e2>=0; e2--){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); + *(bufpt++) = j1 ) *(bufpt++) = ','; } } /* The decimal point */ @@ -30371,13 +31584,12 @@ SQLITE_API void sqlite3_str_vappendf( } /* "0" digits after the decimal point but before the first ** significant digit of the number */ - for(e2++; e2<0; precision--, e2++){ - assert( precision>0 ); + for(e2++; e2<0 && precision>0; precision--, e2++){ *(bufpt++) = '0'; } /* Significant digits after the decimal point */ while( (precision--)>0 ){ - *(bufpt++) = et_getdigit(&realvalue,&nsd); + *(bufpt++) = jcharset]; if( exp<0 ){ *(bufpt++) = '-'; exp = -exp; @@ -30426,8 +31639,8 @@ SQLITE_API void sqlite3_str_vappendf( while( nPad-- ) bufpt[i++] = '0'; length = width; } -#endif /* !defined(SQLITE_OMIT_FLOATING_POINT) */ break; + } case etSIZE: if( !bArgList ){ *(va_arg(ap,int*)) = pAccum->nChar; @@ -30476,13 +31689,26 @@ SQLITE_API void sqlite3_str_vappendf( } } if( precision>1 ){ + i64 nPrior = 1; width -= precision-1; if( width>1 && !flag_leftjustify ){ sqlite3_str_appendchar(pAccum, width-1, ' '); width = 0; } - while( precision-- > 1 ){ - sqlite3_str_append(pAccum, buf, length); + sqlite3_str_append(pAccum, buf, length); + precision--; + while( precision > 1 ){ + i64 nCopyBytes; + if( nPrior > precision-1 ) nPrior = precision - 1; + nCopyBytes = length*nPrior; + if( nCopyBytes + pAccum->nChar >= pAccum->nAlloc ){ + sqlite3StrAccumEnlarge(pAccum, nCopyBytes); + } + if( pAccum->accError ) break; + sqlite3_str_append(pAccum, + &pAccum->zText[pAccum->nChar-nCopyBytes], nCopyBytes); + precision -= nPrior; + nPrior *= 2; } } bufpt = buf; @@ -30710,9 +31936,9 @@ SQLITE_PRIVATE void sqlite3RecordErrorOffsetOfExpr(sqlite3 *db, const Expr *pExp ** Return the number of bytes of text that StrAccum is able to accept ** after the attempted enlargement. The value returned might be zero. */ -SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){ +SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, i64 N){ char *zNew; - assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */ + assert( p->nChar+N >= p->nAlloc ); /* Only called if really needed */ if( p->accError ){ testcase(p->accError==SQLITE_TOOBIG); testcase(p->accError==SQLITE_NOMEM); @@ -30723,8 +31949,7 @@ SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){ return p->nAlloc - p->nChar - 1; }else{ char *zOld = isMalloced(p) ? p->zText : 0; - i64 szNew = p->nChar; - szNew += (sqlite3_int64)N + 1; + i64 szNew = p->nChar + N + 1; if( szNew+p->nChar<=p->mxAlloc ){ /* Force exponential buffer size growth as long as it does not overflow, ** to avoid having to call this routine too often */ @@ -30754,7 +31979,8 @@ SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){ return 0; } } - return N; + assert( N>=0 && N<=0x7fffffff ); + return (int)N; } /* @@ -31045,12 +32271,22 @@ SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_li return zBuf; } SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){ - char *z; + StrAccum acc; va_list ap; + if( n<=0 ) return zBuf; +#ifdef SQLITE_ENABLE_API_ARMOR + if( zBuf==0 || zFormat==0 ) { + (void)SQLITE_MISUSE_BKPT; + if( zBuf ) zBuf[0] = 0; + return zBuf; + } +#endif + sqlite3StrAccumInit(&acc, 0, zBuf, n, 0); va_start(ap,zFormat); - z = sqlite3_vsnprintf(n, zBuf, zFormat, ap); + sqlite3_str_vappendf(&acc, zFormat, ap); va_end(ap); - return z; + zBuf[acc.nChar] = 0; + return zBuf; } /* @@ -31128,6 +32364,75 @@ SQLITE_API void sqlite3_str_appendf(StrAccum *p, const char *zFormat, ...){ va_end(ap); } + +/***************************************************************************** +** Reference counted string/blob storage +*****************************************************************************/ + +/* +** Increase the reference count of the string by one. +** +** The input parameter is returned. +*/ +SQLITE_PRIVATE char *sqlite3RCStrRef(char *z){ + RCStr *p = (RCStr*)z; + assert( p!=0 ); + p--; + p->nRCRef++; + return z; +} + +/* +** Decrease the reference count by one. Free the string when the +** reference count reaches zero. +*/ +SQLITE_PRIVATE void sqlite3RCStrUnref(void *z){ + RCStr *p = (RCStr*)z; + assert( p!=0 ); + p--; + assert( p->nRCRef>0 ); + if( p->nRCRef>=2 ){ + p->nRCRef--; + }else{ + sqlite3_free(p); + } +} + +/* +** Create a new string that is capable of holding N bytes of text, not counting +** the zero byte at the end. The string is uninitialized. +** +** The reference count is initially 1. Call sqlite3RCStrUnref() to free the +** newly allocated string. +** +** This routine returns 0 on an OOM. +*/ +SQLITE_PRIVATE char *sqlite3RCStrNew(u64 N){ + RCStr *p = sqlite3_malloc64( N + sizeof(*p) + 1 ); + if( p==0 ) return 0; + p->nRCRef = 1; + return (char*)&p[1]; +} + +/* +** Change the size of the string so that it is able to hold N bytes. +** The string might be reallocated, so return the new allocation. +*/ +SQLITE_PRIVATE char *sqlite3RCStrResize(char *z, u64 N){ + RCStr *p = (RCStr*)z; + RCStr *pNew; + assert( p!=0 ); + p--; + assert( p->nRCRef==1 ); + pNew = sqlite3_realloc64(p, N+sizeof(RCStr)+1); + if( pNew==0 ){ + sqlite3_free(p); + return 0; + }else{ + return (char*)&pNew[1]; + } +} + /************** End of printf.c **********************************************/ /************** Begin file treeview.c ****************************************/ /* @@ -31350,6 +32655,13 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc) if( pItem->fg.isOn || (pItem->fg.isUsing==0 && pItem->u3.pOn!=0) ){ sqlite3_str_appendf(&x, " ON"); } + if( pItem->fg.isTabFunc ) sqlite3_str_appendf(&x, " isTabFunc"); + if( pItem->fg.isCorrelated ) sqlite3_str_appendf(&x, " isCorrelated"); + if( pItem->fg.isMaterialized ) sqlite3_str_appendf(&x, " isMaterialized"); + if( pItem->fg.viaCoroutine ) sqlite3_str_appendf(&x, " viaCoroutine"); + if( pItem->fg.notCte ) sqlite3_str_appendf(&x, " notCte"); + if( pItem->fg.isNestedFrom ) sqlite3_str_appendf(&x, " isNestedFrom"); + sqlite3StrAccumFinish(&x); sqlite3TreeViewItem(pView, zLine, inSrc-1); n = 0; @@ -31537,6 +32849,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u sqlite3TreeViewItem(pView, "FILTER", 1); sqlite3TreeViewExpr(pView, pWin->pFilter, 0); sqlite3TreeViewPop(&pView); + if( pWin->eFrmType==TK_FILTER ) return; } sqlite3TreeViewPush(&pView, more); if( pWin->zName ){ @@ -31546,7 +32859,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u } if( pWin->zBase ) nElement++; if( pWin->pOrderBy ) nElement++; - if( pWin->eFrmType ) nElement++; + if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ) nElement++; if( pWin->eExclude ) nElement++; if( pWin->zBase ){ sqlite3TreeViewPush(&pView, (--nElement)>0); @@ -31559,7 +32872,7 @@ SQLITE_PRIVATE void sqlite3TreeViewWindow(TreeView *pView, const Window *pWin, u if( pWin->pOrderBy ){ sqlite3TreeViewExprList(pView, pWin->pOrderBy, (--nElement)>0, "ORDER-BY"); } - if( pWin->eFrmType ){ + if( pWin->eFrmType!=0 && pWin->eFrmType!=TK_FILTER ){ char zBuf[30]; const char *zFrmType = "ROWS"; if( pWin->eFrmType==TK_RANGE ) zFrmType = "RANGE"; @@ -31619,7 +32932,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m sqlite3TreeViewPop(&pView); return; } - if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags ){ + if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags || pExpr->pAggInfo ){ StrAccum x; sqlite3StrAccumInit(&x, 0, zFlgs, sizeof(zFlgs), 0); sqlite3_str_appendf(&x, " fg.af=%x.%c", @@ -31636,6 +32949,9 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m if( ExprHasVVAProperty(pExpr, EP_Immutable) ){ sqlite3_str_appendf(&x, " IMMUTABLE"); } + if( pExpr->pAggInfo!=0 ){ + sqlite3_str_appendf(&x, " agg-column[%d]", pExpr->iAgg); + } sqlite3StrAccumFinish(&x); }else{ zFlgs[0] = 0; @@ -31765,7 +33081,8 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m }; assert( pExpr->op2==TK_IS || pExpr->op2==TK_ISNOT ); assert( pExpr->pRight ); - assert( sqlite3ExprSkipCollate(pExpr->pRight)->op==TK_TRUEFALSE ); + assert( sqlite3ExprSkipCollateAndLikely(pExpr->pRight)->op + == TK_TRUEFALSE ); x = (pExpr->op2==TK_ISNOT)*2 + sqlite3ExprTruthValue(pExpr->pRight); zUniOp = azOp[x]; break; @@ -31803,7 +33120,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m assert( ExprUseXList(pExpr) ); pFarg = pExpr->x.pList; #ifndef SQLITE_OMIT_WINDOWFUNC - pWin = ExprHasProperty(pExpr, EP_WinFunc) ? pExpr->y.pWin : 0; + pWin = IsWindowFunc(pExpr) ? pExpr->y.pWin : 0; #else pWin = 0; #endif @@ -31829,7 +33146,13 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m sqlite3TreeViewLine(pView, "FUNCTION %Q%s", pExpr->u.zToken, zFlgs); } if( pFarg ){ - sqlite3TreeViewExprList(pView, pFarg, pWin!=0, 0); + sqlite3TreeViewExprList(pView, pFarg, pWin!=0 || pExpr->pLeft, 0); + if( pExpr->pLeft ){ + Expr *pOB = pExpr->pLeft; + assert( pOB->op==TK_ORDER ); + assert( ExprUseXList(pOB) ); + sqlite3TreeViewExprList(pView, pOB->x.pList, pWin!=0, "ORDERBY"); + } } #ifndef SQLITE_OMIT_WINDOWFUNC if( pWin ){ @@ -31838,6 +33161,10 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m #endif break; } + case TK_ORDER: { + sqlite3TreeViewExprList(pView, pExpr->x.pList, 0, "ORDERBY"); + break; + } #ifndef SQLITE_OMIT_SUBQUERY case TK_EXISTS: { assert( ExprUseXSelect(pExpr) ); @@ -31891,7 +33218,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m assert( pExpr->x.pList->nExpr==2 ); pY = pExpr->x.pList->a[0].pExpr; pZ = pExpr->x.pList->a[1].pExpr; - sqlite3TreeViewLine(pView, "BETWEEN"); + sqlite3TreeViewLine(pView, "BETWEEN%s", zFlgs); sqlite3TreeViewExpr(pView, pX, 1); sqlite3TreeViewExpr(pView, pY, 1); sqlite3TreeViewExpr(pView, pZ, 0); @@ -32447,16 +33774,41 @@ SQLITE_PRIVATE void sqlite3ShowWinFunc(const Window *p){ sqlite3TreeViewWinFunc( ** This structure is the current state of the generator. */ static SQLITE_WSD struct sqlite3PrngType { - unsigned char isInit; /* True if initialized */ - unsigned char i, j; /* State variables */ - unsigned char s[256]; /* State variables */ + u32 s[16]; /* 64 bytes of chacha20 state */ + u8 out[64]; /* Output bytes */ + u8 n; /* Output bytes remaining */ } sqlite3Prng; + +/* The RFC-7539 ChaCha20 block function +*/ +#define ROTL(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) +#define QR(a, b, c, d) ( \ + a += b, d ^= a, d = ROTL(d,16), \ + c += d, b ^= c, b = ROTL(b,12), \ + a += b, d ^= a, d = ROTL(d, 8), \ + c += d, b ^= c, b = ROTL(b, 7)) +static void chacha_block(u32 *out, const u32 *in){ + int i; + u32 x[16]; + memcpy(x, in, 64); + for(i=0; i<10; i++){ + QR(x[0], x[4], x[ 8], x[12]); + QR(x[1], x[5], x[ 9], x[13]); + QR(x[2], x[6], x[10], x[14]); + QR(x[3], x[7], x[11], x[15]); + QR(x[0], x[5], x[10], x[15]); + QR(x[1], x[6], x[11], x[12]); + QR(x[2], x[7], x[ 8], x[13]); + QR(x[3], x[4], x[ 9], x[14]); + } + for(i=0; i<16; i++) out[i] = x[i]+in[i]; +} + /* ** Return N random bytes. */ SQLITE_API void sqlite3_randomness(int N, void *pBuf){ - unsigned char t; unsigned char *zBuf = pBuf; /* The "wsdPrng" macro will resolve to the pseudo-random number generator @@ -32486,53 +33838,46 @@ SQLITE_API void sqlite3_randomness(int N, void *pBuf){ sqlite3_mutex_enter(mutex); if( N<=0 || pBuf==0 ){ - wsdPrng.isInit = 0; + wsdPrng.s[0] = 0; sqlite3_mutex_leave(mutex); return; } /* Initialize the state of the random number generator once, - ** the first time this routine is called. The seed value does - ** not need to contain a lot of randomness since we are not - ** trying to do secure encryption or anything like that... - ** - ** Nothing in this file or anywhere else in SQLite does any kind of - ** encryption. The RC4 algorithm is being used as a PRNG (pseudo-random - ** number generator) not as an encryption device. + ** the first time this routine is called. */ - if( !wsdPrng.isInit ){ + if( wsdPrng.s[0]==0 ){ sqlite3_vfs *pVfs = sqlite3_vfs_find(0); - int i; - char k[256]; - wsdPrng.j = 0; - wsdPrng.i = 0; + static const u32 chacha20_init[] = { + 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 + }; + memcpy(&wsdPrng.s[0], chacha20_init, 16); if( NEVER(pVfs==0) ){ - memset(k, 0, sizeof(k)); + memset(&wsdPrng.s[4], 0, 44); }else{ - sqlite3OsRandomness(pVfs, 256, k); - } - for(i=0; i<256; i++){ - wsdPrng.s[i] = (u8)i; - } - for(i=0; i<256; i++){ - wsdPrng.j += wsdPrng.s[i] + k[i]; - t = wsdPrng.s[wsdPrng.j]; - wsdPrng.s[wsdPrng.j] = wsdPrng.s[i]; - wsdPrng.s[i] = t; + sqlite3OsRandomness(pVfs, 44, (char*)&wsdPrng.s[4]); } - wsdPrng.isInit = 1; + wsdPrng.s[15] = wsdPrng.s[12]; + wsdPrng.s[12] = 0; + wsdPrng.n = 0; } assert( N>0 ); - do{ - wsdPrng.i++; - t = wsdPrng.s[wsdPrng.i]; - wsdPrng.j += t; - wsdPrng.s[wsdPrng.i] = wsdPrng.s[wsdPrng.j]; - wsdPrng.s[wsdPrng.j] = t; - t += wsdPrng.s[wsdPrng.i]; - *(zBuf++) = wsdPrng.s[t]; - }while( --N ); + while( 1 /* exit by break */ ){ + if( N<=wsdPrng.n ){ + memcpy(zBuf, &wsdPrng.out[wsdPrng.n-N], N); + wsdPrng.n -= N; + break; + } + if( wsdPrng.n>0 ){ + memcpy(zBuf, wsdPrng.out, wsdPrng.n); + N -= wsdPrng.n; + zBuf += wsdPrng.n; + } + wsdPrng.s[12]++; + chacha_block((u32*)wsdPrng.out, wsdPrng.s); + wsdPrng.n = 64; + } sqlite3_mutex_leave(mutex); } @@ -33008,7 +34353,38 @@ SQLITE_PRIVATE u32 sqlite3Utf8Read( return c; } - +/* +** Read a single UTF8 character out of buffer z[], but reading no +** more than n characters from the buffer. z[] is not zero-terminated. +** +** Return the number of bytes used to construct the character. +** +** Invalid UTF8 might generate a strange result. No effort is made +** to detect invalid UTF8. +** +** At most 4 bytes will be read out of z[]. The return value will always +** be between 1 and 4. +*/ +SQLITE_PRIVATE int sqlite3Utf8ReadLimited( + const u8 *z, + int n, + u32 *piOut +){ + u32 c; + int i = 1; + assert( n>0 ); + c = z[0]; + if( c>=0xc0 ){ + c = sqlite3Utf8Trans1[c-0xc0]; + if( n>4 ) n = 4; + while( inDb; ii++){ + if( db->aDb[ii].pBt ){ + iErr = sqlite3PagerWalSystemErrno(sqlite3BtreePager(db->aDb[ii].pBt)); + if( iErr ){ + db->iSysErrno = iErr; + } + } + } + sqlite3BtreeLeaveAll(db); + return; + } +#endif rc &= 0xff; if( rc==SQLITE_CANTOPEN || rc==SQLITE_IOERR ){ db->iSysErrno = sqlite3OsGetLastError(db->pVfs); @@ -33557,6 +34963,30 @@ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *z } } +/* +** Check for interrupts and invoke progress callback. +*/ +SQLITE_PRIVATE void sqlite3ProgressCheck(Parse *p){ + sqlite3 *db = p->db; + if( AtomicLoad(&db->u1.isInterrupted) ){ + p->nErr++; + p->rc = SQLITE_INTERRUPT; + } +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + if( db->xProgress ){ + if( p->rc==SQLITE_INTERRUPT ){ + p->nProgressSteps = 0; + }else if( (++p->nProgressSteps)>=db->nProgressOps ){ + if( db->xProgress(db->pProgressArg) ){ + p->nErr++; + p->rc = SQLITE_INTERRUPT; + } + p->nProgressSteps = 0; + } + } +#endif +} + /* ** Add an error message to pParse->zErrMsg and increment pParse->nErr. ** @@ -33748,43 +35178,40 @@ SQLITE_PRIVATE u8 sqlite3StrIHash(const char *z){ return h; } -/* -** Compute 10 to the E-th power. Examples: E==1 results in 10. -** E==2 results in 100. E==50 results in 1.0e50. +/* Double-Double multiplication. (x[0],x[1]) *= (y,yy) ** -** This routine only works for values of E between 1 and 341. +** Reference: +** T. J. Dekker, "A Floating-Point Technique for Extending the +** Available Precision". 1971-07-26. */ -static LONGDOUBLE_TYPE sqlite3Pow10(int E){ -#if defined(_MSC_VER) - static const LONGDOUBLE_TYPE x[] = { - 1.0e+001L, - 1.0e+002L, - 1.0e+004L, - 1.0e+008L, - 1.0e+016L, - 1.0e+032L, - 1.0e+064L, - 1.0e+128L, - 1.0e+256L - }; - LONGDOUBLE_TYPE r = 1.0; - int i; - assert( E>=0 && E<=307 ); - for(i=0; E!=0; i++, E >>=1){ - if( E & 1 ) r *= x[i]; - } - return r; -#else - LONGDOUBLE_TYPE x = 10.0; - LONGDOUBLE_TYPE r = 1.0; - while(1){ - if( E & 1 ) r *= x; - E >>= 1; - if( E==0 ) break; - x *= x; - } - return r; -#endif +static void dekkerMul2(volatile double *x, double y, double yy){ + /* + ** The "volatile" keywords on parameter x[] and on local variables + ** below are needed force intermediate results to be truncated to + ** binary64 rather than be carried around in an extended-precision + ** format. The truncation is necessary for the Dekker algorithm to + ** work. Intel x86 floating point might omit the truncation without + ** the use of volatile. + */ + volatile double tx, ty, p, q, c, cc; + double hx, hy; + u64 m; + memcpy(&m, (void*)&x[0], 8); + m &= 0xfffffffffc000000LL; + memcpy(&hx, &m, 8); + tx = x[0] - hx; + memcpy(&m, &y, 8); + m &= 0xfffffffffc000000LL; + memcpy(&hy, &m, 8); + ty = y - hy; + p = hx*hy; + q = hx*ty + tx*hy; + c = p+q; + cc = p - c + q + tx*ty; + cc = x[0]*yy + x[1]*y + cc; + x[0] = c + cc; + x[1] = c - x[0]; + x[1] += cc; } /* @@ -33825,12 +35252,11 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en const char *zEnd; /* sign * significand * (10 ^ (esign * exponent)) */ int sign = 1; /* sign of significand */ - i64 s = 0; /* significand */ + u64 s = 0; /* significand */ int d = 0; /* adjust exponent for shifting decimal point */ int esign = 1; /* sign of exponent */ int e = 0; /* exponent */ int eValid = 1; /* True exponent is either not used or is well-formed */ - double result; int nDigit = 0; /* Number of digits processed */ int eType = 1; /* 1: pure integer, 2+: fractional -1 or less: bad UTF16 */ @@ -33870,7 +35296,7 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en while( z=((LARGEST_INT64-9)/10) ){ + if( s>=((LARGEST_UINT64-9)/10) ){ /* skip non-significant significand digits ** (increase exponent by d to shift decimal left) */ while( z0 ){ /*OPTIMIZATION-IF-TRUE*/ - if( esign>0 ){ - if( s>=(LARGEST_INT64/10) ) break; /*OPTIMIZATION-IF-FALSE*/ - s *= 10; - }else{ - if( s%10!=0 ) break; /*OPTIMIZATION-IF-FALSE*/ - s /= 10; - } - e--; - } + /* adjust exponent by d, and update sign */ + e = (e*esign) + d; - /* adjust the sign of significand */ - s = sign<0 ? -s : s; + /* Try to adjust the exponent to make it smaller */ + while( e>0 && s<(LARGEST_UINT64/10) ){ + s *= 10; + e--; + } + while( e<0 && (s%10)==0 ){ + s /= 10; + e++; + } - if( e==0 ){ /*OPTIMIZATION-IF-TRUE*/ - result = (double)s; + if( e==0 ){ + *pResult = s; + }else if( sqlite3Config.bUseLongDouble ){ + LONGDOUBLE_TYPE r = (LONGDOUBLE_TYPE)s; + if( e>0 ){ + while( e>=100 ){ e-=100; r *= 1.0e+100L; } + while( e>=10 ){ e-=10; r *= 1.0e+10L; } + while( e>=1 ){ e-=1; r *= 1.0e+01L; } }else{ - /* attempt to handle extremely small/large numbers better */ - if( e>307 ){ /*OPTIMIZATION-IF-TRUE*/ - if( e<342 ){ /*OPTIMIZATION-IF-TRUE*/ - LONGDOUBLE_TYPE scale = sqlite3Pow10(e-308); - if( esign<0 ){ - result = s / scale; - result /= 1.0e+308; - }else{ - result = s * scale; - result *= 1.0e+308; - } - }else{ assert( e>=342 ); - if( esign<0 ){ - result = 0.0*s; - }else{ + while( e<=-100 ){ e+=100; r *= 1.0e-100L; } + while( e<=-10 ){ e+=10; r *= 1.0e-10L; } + while( e<=-1 ){ e+=1; r *= 1.0e-01L; } + } + assert( r>=0.0 ); + if( r>+1.7976931348623157081452742373e+308L ){ #ifdef INFINITY - result = INFINITY*s; + *pResult = +INFINITY; #else - result = 1e308*1e308*s; /* Infinity */ + *pResult = 1.0e308*10.0; #endif - } - } - }else{ - LONGDOUBLE_TYPE scale = sqlite3Pow10(e); - if( esign<0 ){ - result = s / scale; - }else{ - result = s * scale; - } + }else{ + *pResult = (double)r; + } + }else{ + double rr[2]; + u64 s2; + rr[0] = (double)s; + s2 = (u64)rr[0]; +#if defined(_MSC_VER) && _MSC_VER<1700 + if( s2==0x8000000000000000LL ){ s2 = 2*(u64)(0.5*rr[0]); } +#endif + rr[1] = s>=s2 ? (double)(s - s2) : -(double)(s2 - s); + if( e>0 ){ + while( e>=100 ){ + e -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( e>=10 ){ + e -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( e>=1 ){ + e -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); + } + }else{ + while( e<=-100 ){ + e += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( e<=-10 ){ + e += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( e<=-1 ){ + e += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); } } + *pResult = rr[0]+rr[1]; + if( sqlite3IsNaN(*pResult) ) *pResult = 1e300*1e300; } + if( sign<0 ) *pResult = -*pResult; + assert( !sqlite3IsNaN(*pResult) ); - /* store the result */ - *pResult = result; - - /* return true if number and no extra non-whitespace chracters after */ +atof_return: + /* return true if number and no extra non-whitespace characters after */ if( z==zEnd && nDigit>0 && eValid && eType>0 ){ return eType; }else if( eType>=2 && (eType==3 || eValid) && nDigit>0 ){ @@ -34014,11 +35453,14 @@ SQLITE_PRIVATE int sqlite3AtoF(const char *z, double *pResult, int length, u8 en #endif /* -** Render an signed 64-bit integer as text. Store the result in zOut[]. +** Render an signed 64-bit integer as text. Store the result in zOut[] and +** return the length of the string that was stored, in bytes. The value +** returned does not include the zero terminator at the end of the output +** string. ** ** The caller must ensure that zOut[] is at least 21 bytes in size. */ -SQLITE_PRIVATE void sqlite3Int64ToText(i64 v, char *zOut){ +SQLITE_PRIVATE int sqlite3Int64ToText(i64 v, char *zOut){ int i; u64 x; char zTemp[22]; @@ -34029,12 +35471,15 @@ SQLITE_PRIVATE void sqlite3Int64ToText(i64 v, char *zOut){ } i = sizeof(zTemp)-2; zTemp[sizeof(zTemp)-1] = 0; - do{ - zTemp[i--] = (x%10) + '0'; + while( 1 /*exit-by-break*/ ){ + zTemp[i] = (x%10) + '0'; x = x/10; - }while( x ); - if( v<0 ) zTemp[i--] = '-'; - memcpy(zOut, &zTemp[i+1], sizeof(zTemp)-1-i); + if( x==0 ) break; + i--; + }; + if( v<0 ) zTemp[--i] = '-'; + memcpy(zOut, &zTemp[i], sizeof(zTemp)-i); + return sizeof(zTemp)-1-i; } /* @@ -34127,7 +35572,7 @@ SQLITE_PRIVATE int sqlite3Atoi64(const char *zNum, i64 *pNum, int length, u8 enc /* This test and assignment is needed only to suppress UB warnings ** from clang and -fsanitize=undefined. This test and assignment make ** the code a little larger and slower, and no harm comes from omitting - ** them, but we must appaise the undefined-behavior pharisees. */ + ** them, but we must appease the undefined-behavior pharisees. */ *pNum = neg ? SMALLEST_INT64 : LARGEST_INT64; }else if( neg ){ *pNum = -(i64)u; @@ -34199,11 +35644,15 @@ SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char *z, i64 *pOut){ u = u*16 + sqlite3HexToInt(z[k]); } memcpy(pOut, &u, 8); - return (z[k]==0 && k-i<=16) ? 0 : 2; + if( k-i>16 ) return 2; + if( z[k]!=0 ) return 1; + return 0; }else #endif /* SQLITE_OMIT_HEX_INTEGER */ { - return sqlite3Atoi64(z, pOut, sqlite3Strlen30(z), SQLITE_UTF8); + int n = (int)(0x3fffffff&strspn(z,"+- \n\t0123456789")); + if( z[n] ) n++; + return sqlite3Atoi64(z, pOut, n, SQLITE_UTF8); } } @@ -34235,7 +35684,7 @@ SQLITE_PRIVATE int sqlite3GetInt32(const char *zNum, int *pValue){ u32 u = 0; zNum += 2; while( zNum[0]=='0' ) zNum++; - for(i=0; sqlite3Isxdigit(zNum[i]) && i<8; i++){ + for(i=0; i<8 && sqlite3Isxdigit(zNum[i]); i++){ u = u*16 + sqlite3HexToInt(zNum[i]); } if( (u&0x80000000)==0 && sqlite3Isxdigit(zNum[i])==0 ){ @@ -34282,6 +35731,153 @@ SQLITE_PRIVATE int sqlite3Atoi(const char *z){ return x; } +/* +** Decode a floating-point value into an approximate decimal +** representation. +** +** Round the decimal representation to n significant digits if +** n is positive. Or round to -n signficant digits after the +** decimal point if n is negative. No rounding is performed if +** n is zero. +** +** The significant digits of the decimal representation are +** stored in p->z[] which is a often (but not always) a pointer +** into the middle of p->zBuf[]. There are p->n significant digits. +** The p->z[] array is *not* zero-terminated. +*/ +SQLITE_PRIVATE void sqlite3FpDecode(FpDecode *p, double r, int iRound, int mxRound){ + int i; + u64 v; + int e, exp = 0; + p->isSpecial = 0; + p->z = p->zBuf; + + /* Convert negative numbers to positive. Deal with Infinity, 0.0, and + ** NaN. */ + if( r<0.0 ){ + p->sign = '-'; + r = -r; + }else if( r==0.0 ){ + p->sign = '+'; + p->n = 1; + p->iDP = 1; + p->z = "0"; + return; + }else{ + p->sign = '+'; + } + memcpy(&v,&r,8); + e = v>>52; + if( (e&0x7ff)==0x7ff ){ + p->isSpecial = 1 + (v!=0x7ff0000000000000LL); + p->n = 0; + p->iDP = 0; + return; + } + + /* Multiply r by powers of ten until it lands somewhere in between + ** 1.0e+19 and 1.0e+17. + */ + if( sqlite3Config.bUseLongDouble ){ + LONGDOUBLE_TYPE rr = r; + if( rr>=1.0e+19 ){ + while( rr>=1.0e+119L ){ exp+=100; rr *= 1.0e-100L; } + while( rr>=1.0e+29L ){ exp+=10; rr *= 1.0e-10L; } + while( rr>=1.0e+19L ){ exp++; rr *= 1.0e-1L; } + }else{ + while( rr<1.0e-97L ){ exp-=100; rr *= 1.0e+100L; } + while( rr<1.0e+07L ){ exp-=10; rr *= 1.0e+10L; } + while( rr<1.0e+17L ){ exp--; rr *= 1.0e+1L; } + } + v = (u64)rr; + }else{ + /* If high-precision floating point is not available using "long double", + ** then use Dekker-style double-double computation to increase the + ** precision. + ** + ** The error terms on constants like 1.0e+100 computed using the + ** decimal extension, for example as follows: + ** + ** SELECT decimal_exp(decimal_sub('1.0e+100',decimal(1.0e+100))); + */ + double rr[2]; + rr[0] = r; + rr[1] = 0.0; + if( rr[0]>9.223372036854774784e+18 ){ + while( rr[0]>9.223372036854774784e+118 ){ + exp += 100; + dekkerMul2(rr, 1.0e-100, -1.99918998026028836196e-117); + } + while( rr[0]>9.223372036854774784e+28 ){ + exp += 10; + dekkerMul2(rr, 1.0e-10, -3.6432197315497741579e-27); + } + while( rr[0]>9.223372036854774784e+18 ){ + exp += 1; + dekkerMul2(rr, 1.0e-01, -5.5511151231257827021e-18); + } + }else{ + while( rr[0]<9.223372036854774784e-83 ){ + exp -= 100; + dekkerMul2(rr, 1.0e+100, -1.5902891109759918046e+83); + } + while( rr[0]<9.223372036854774784e+07 ){ + exp -= 10; + dekkerMul2(rr, 1.0e+10, 0.0); + } + while( rr[0]<9.22337203685477478e+17 ){ + exp -= 1; + dekkerMul2(rr, 1.0e+01, 0.0); + } + } + v = rr[1]<0.0 ? (u64)rr[0]-(u64)(-rr[1]) : (u64)rr[0]+(u64)rr[1]; + } + + + /* Extract significant digits. */ + i = sizeof(p->zBuf)-1; + assert( v>0 ); + while( v ){ p->zBuf[i--] = (v%10) + '0'; v /= 10; } + assert( i>=0 && izBuf)-1 ); + p->n = sizeof(p->zBuf) - 1 - i; + assert( p->n>0 ); + assert( p->nzBuf) ); + p->iDP = p->n + exp; + if( iRound<=0 ){ + iRound = p->iDP - iRound; + if( iRound==0 && p->zBuf[i+1]>='5' ){ + iRound = 1; + p->zBuf[i--] = '0'; + p->n++; + p->iDP++; + } + } + if( iRound>0 && (iRoundn || p->n>mxRound) ){ + char *z = &p->zBuf[i+1]; + if( iRound>mxRound ) iRound = mxRound; + p->n = iRound; + if( z[iRound]>='5' ){ + int j = iRound-1; + while( 1 /*exit-by-break*/ ){ + z[j]++; + if( z[j]<='9' ) break; + z[j] = '0'; + if( j==0 ){ + p->z[i--] = '1'; + p->n++; + p->iDP++; + break; + }else{ + j--; + } + } + } + } + p->z = &p->zBuf[i+1]; + assert( i+p->n < sizeof(p->zBuf) ); + while( ALWAYS(p->n>0) && p->z[p->n-1]=='0' ){ p->n--; } +} + /* ** Try to convert z into an unsigned 32-bit integer. Return true on ** success and false if there is an error. @@ -34545,121 +36141,32 @@ SQLITE_PRIVATE u8 sqlite3GetVarint(const unsigned char *p, u64 *v){ ** this function assumes the single-byte case has already been handled. */ SQLITE_PRIVATE u8 sqlite3GetVarint32(const unsigned char *p, u32 *v){ - u32 a,b; + u64 v64; + u8 n; - /* The 1-byte case. Overwhelmingly the most common. Handled inline - ** by the getVarin32() macro */ - a = *p; - /* a: p0 (unmasked) */ -#ifndef getVarint32 - if (!(a&0x80)) - { - /* Values between 0 and 127 */ - *v = a; - return 1; - } -#endif + /* Assume that the single-byte case has already been handled by + ** the getVarint32() macro */ + assert( (p[0] & 0x80)!=0 ); - /* The 2-byte case */ - p++; - b = *p; - /* b: p1 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 128 and 16383 */ - a &= 0x7f; - a = a<<7; - *v = a | b; + if( (p[1] & 0x80)==0 ){ + /* This is the two-byte case */ + *v = ((p[0]&0x7f)<<7) | p[1]; return 2; } - - /* The 3-byte case */ - p++; - a = a<<14; - a |= *p; - /* a: p0<<14 | p2 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 16384 and 2097151 */ - a &= (0x7f<<14)|(0x7f); - b &= 0x7f; - b = b<<7; - *v = a | b; + if( (p[2] & 0x80)==0 ){ + /* This is the three-byte case */ + *v = ((p[0]&0x7f)<<14) | ((p[1]&0x7f)<<7) | p[2]; return 3; } - - /* A 32-bit varint is used to store size information in btrees. - ** Objects are rarely larger than 2MiB limit of a 3-byte varint. - ** A 3-byte varint is sufficient, for example, to record the size - ** of a 1048569-byte BLOB or string. - ** - ** We only unroll the first 1-, 2-, and 3- byte cases. The very - ** rare larger cases can be handled by the slower 64-bit varint - ** routine. - */ -#if 1 - { - u64 v64; - u8 n; - - n = sqlite3GetVarint(p-2, &v64); - assert( n>3 && n<=9 ); - if( (v64 & SQLITE_MAX_U32)!=v64 ){ - *v = 0xffffffff; - }else{ - *v = (u32)v64; - } - return n; - } - -#else - /* For following code (kept for historical record only) shows an - ** unrolling for the 3- and 4-byte varint cases. This code is - ** slightly faster, but it is also larger and much harder to test. - */ - p++; - b = b<<14; - b |= *p; - /* b: p1<<14 | p3 (unmasked) */ - if (!(b&0x80)) - { - /* Values between 2097152 and 268435455 */ - b &= (0x7f<<14)|(0x7f); - a &= (0x7f<<14)|(0x7f); - a = a<<7; - *v = a | b; - return 4; - } - - p++; - a = a<<14; - a |= *p; - /* a: p0<<28 | p2<<14 | p4 (unmasked) */ - if (!(a&0x80)) - { - /* Values between 268435456 and 34359738367 */ - a &= SLOT_4_2_0; - b &= SLOT_4_2_0; - b = b<<7; - *v = a | b; - return 5; - } - - /* We can only reach this point when reading a corrupt database - ** file. In that case we are not in any hurry. Use the (relatively - ** slow) general-purpose sqlite3GetVarint() routine to extract the - ** value. */ - { - u64 v64; - u8 n; - - p -= 4; - n = sqlite3GetVarint(p, &v64); - assert( n>5 && n<=9 ); + /* four or more bytes */ + n = sqlite3GetVarint(p, &v64); + assert( n>3 && n<=9 ); + if( (v64 & SQLITE_MAX_U32)!=v64 ){ + *v = 0xffffffff; + }else{ *v = (u32)v64; - return n; } -#endif + return n; } /* @@ -34812,7 +36319,7 @@ SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){ } /* -** Attempt to add, substract, or multiply the 64-bit signed value iB against +** Attempt to add, subtract, or multiply the 64-bit signed value iB against ** the other 64-bit signed integer at *pA and store the result in *pA. ** Return 0 on success. Or if the operation would have resulted in an ** overflow, leave *pA unchanged and return 1. @@ -35098,6 +36605,104 @@ SQLITE_PRIVATE int sqlite3VListNameToNum(VList *pIn, const char *zName, int nNam return 0; } +/* +** High-resolution hardware timer used for debugging and testing only. +*/ +#if defined(VDBE_PROFILE) \ + || defined(SQLITE_PERFORMANCE_TRACE) \ + || defined(SQLITE_ENABLE_STMT_SCANSTATUS) +/************** Include hwtime.h in the middle of util.c *********************/ +/************** Begin file hwtime.h ******************************************/ +/* +** 2008 May 27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains inline asm code for retrieving "high-performance" +** counters for x86 and x86_64 class CPUs. +*/ +#ifndef SQLITE_HWTIME_H +#define SQLITE_HWTIME_H + +/* +** The following routine only works on Pentium-class (or newer) processors. +** It uses the RDTSC opcode to read the cycle count value out of the +** processor and returns that value. This can be used for high-res +** profiling. +*/ +#if !defined(__STRICT_ANSI__) && \ + (defined(__GNUC__) || defined(_MSC_VER)) && \ + (defined(i386) || defined(__i386__) || defined(_M_IX86)) + + #if defined(__GNUC__) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + + #elif defined(_MSC_VER) + + __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){ + __asm { + rdtsc + ret ; return value at EDX:EAX + } + } + + #endif + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned int lo, hi; + __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi)); + return (sqlite_uint64)hi << 32 | lo; + } + +#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__)) + + __inline__ sqlite_uint64 sqlite3Hwtime(void){ + unsigned long long retval; + unsigned long junk; + __asm__ __volatile__ ("\n\ + 1: mftbu %1\n\ + mftb %L0\n\ + mftbu %0\n\ + cmpw %0,%1\n\ + bne 1b" + : "=r" (retval), "=r" (junk)); + return retval; + } + +#else + + /* + ** asm() is needed for hardware timing support. Without asm(), + ** disable the sqlite3Hwtime() routine. + ** + ** sqlite3Hwtime() is only used for some obscure debugging + ** and analysis configurations, not in any deliverable, so this + ** should not be a great loss. + */ +SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); } + +#endif + +#endif /* !defined(SQLITE_HWTIME_H) */ + +/************** End of hwtime.h **********************************************/ +/************** Continuing where we left off in util.c ***********************/ +#endif + /************** End of util.c ************************************************/ /************** Begin file hash.c ********************************************/ /* @@ -35199,7 +36804,7 @@ static void insertElement( } -/* Resize the hash table so that it cantains "new_size" buckets. +/* Resize the hash table so that it contains "new_size" buckets. ** ** The hash table might fail to resize if sqlite3_malloc() fails or ** if the new size is the same as the prior size. @@ -35268,12 +36873,13 @@ static HashElem *findElementWithHash( count = pH->count; } if( pHash ) *pHash = h; - while( count-- ){ + while( count ){ assert( elem!=0 ); if( sqlite3StrICmp(elem->pKey,pKey)==0 ){ return elem; } elem = elem->next; + count--; } return &nullElement; } @@ -35392,48 +36998,48 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 5 */ "Vacuum" OpHelp(""), /* 6 */ "VFilter" OpHelp("iplan=r[P3] zplan='P4'"), /* 7 */ "VUpdate" OpHelp("data=r[P3@P2]"), - /* 8 */ "Goto" OpHelp(""), - /* 9 */ "Gosub" OpHelp(""), - /* 10 */ "InitCoroutine" OpHelp(""), - /* 11 */ "Yield" OpHelp(""), - /* 12 */ "MustBeInt" OpHelp(""), - /* 13 */ "Jump" OpHelp(""), - /* 14 */ "Once" OpHelp(""), - /* 15 */ "If" OpHelp(""), - /* 16 */ "IfNot" OpHelp(""), - /* 17 */ "IsNullOrType" OpHelp("if typeof(r[P1]) IN (P3,5) goto P2"), - /* 18 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"), + /* 8 */ "Init" OpHelp("Start at P2"), + /* 9 */ "Goto" OpHelp(""), + /* 10 */ "Gosub" OpHelp(""), + /* 11 */ "InitCoroutine" OpHelp(""), + /* 12 */ "Yield" OpHelp(""), + /* 13 */ "MustBeInt" OpHelp(""), + /* 14 */ "Jump" OpHelp(""), + /* 15 */ "Once" OpHelp(""), + /* 16 */ "If" OpHelp(""), + /* 17 */ "IfNot" OpHelp(""), + /* 18 */ "IsType" OpHelp("if typeof(P1.P3) in P5 goto P2"), /* 19 */ "Not" OpHelp("r[P2]= !r[P1]"), - /* 20 */ "SeekLT" OpHelp("key=r[P3@P4]"), - /* 21 */ "SeekLE" OpHelp("key=r[P3@P4]"), - /* 22 */ "SeekGE" OpHelp("key=r[P3@P4]"), - /* 23 */ "SeekGT" OpHelp("key=r[P3@P4]"), - /* 24 */ "IfNotOpen" OpHelp("if( !csr[P1] ) goto P2"), - /* 25 */ "IfNoHope" OpHelp("key=r[P3@P4]"), - /* 26 */ "NoConflict" OpHelp("key=r[P3@P4]"), - /* 27 */ "NotFound" OpHelp("key=r[P3@P4]"), - /* 28 */ "Found" OpHelp("key=r[P3@P4]"), - /* 29 */ "SeekRowid" OpHelp("intkey=r[P3]"), - /* 30 */ "NotExists" OpHelp("intkey=r[P3]"), - /* 31 */ "Last" OpHelp(""), - /* 32 */ "IfSmaller" OpHelp(""), - /* 33 */ "SorterSort" OpHelp(""), - /* 34 */ "Sort" OpHelp(""), - /* 35 */ "Rewind" OpHelp(""), - /* 36 */ "SorterNext" OpHelp(""), - /* 37 */ "Prev" OpHelp(""), - /* 38 */ "Next" OpHelp(""), - /* 39 */ "IdxLE" OpHelp("key=r[P3@P4]"), - /* 40 */ "IdxGT" OpHelp("key=r[P3@P4]"), - /* 41 */ "IdxLT" OpHelp("key=r[P3@P4]"), - /* 42 */ "IdxGE" OpHelp("key=r[P3@P4]"), + /* 20 */ "IfNullRow" OpHelp("if P1.nullRow then r[P3]=NULL, goto P2"), + /* 21 */ "SeekLT" OpHelp("key=r[P3@P4]"), + /* 22 */ "SeekLE" OpHelp("key=r[P3@P4]"), + /* 23 */ "SeekGE" OpHelp("key=r[P3@P4]"), + /* 24 */ "SeekGT" OpHelp("key=r[P3@P4]"), + /* 25 */ "IfNotOpen" OpHelp("if( !csr[P1] ) goto P2"), + /* 26 */ "IfNoHope" OpHelp("key=r[P3@P4]"), + /* 27 */ "NoConflict" OpHelp("key=r[P3@P4]"), + /* 28 */ "NotFound" OpHelp("key=r[P3@P4]"), + /* 29 */ "Found" OpHelp("key=r[P3@P4]"), + /* 30 */ "SeekRowid" OpHelp("intkey=r[P3]"), + /* 31 */ "NotExists" OpHelp("intkey=r[P3]"), + /* 32 */ "Last" OpHelp(""), + /* 33 */ "IfSmaller" OpHelp(""), + /* 34 */ "SorterSort" OpHelp(""), + /* 35 */ "Sort" OpHelp(""), + /* 36 */ "Rewind" OpHelp(""), + /* 37 */ "SorterNext" OpHelp(""), + /* 38 */ "Prev" OpHelp(""), + /* 39 */ "Next" OpHelp(""), + /* 40 */ "IdxLE" OpHelp("key=r[P3@P4]"), + /* 41 */ "IdxGT" OpHelp("key=r[P3@P4]"), + /* 42 */ "IdxLT" OpHelp("key=r[P3@P4]"), /* 43 */ "Or" OpHelp("r[P3]=(r[P1] || r[P2])"), /* 44 */ "And" OpHelp("r[P3]=(r[P1] && r[P2])"), - /* 45 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), - /* 46 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), - /* 47 */ "Program" OpHelp(""), - /* 48 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), - /* 49 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 45 */ "IdxGE" OpHelp("key=r[P3@P4]"), + /* 46 */ "RowSetRead" OpHelp("r[P3]=rowset(P1)"), + /* 47 */ "RowSetTest" OpHelp("if r[P3] in rowset(P1) goto P2"), + /* 48 */ "Program" OpHelp(""), + /* 49 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"), /* 50 */ "IsNull" OpHelp("if r[P1]==NULL goto P2"), /* 51 */ "NotNull" OpHelp("if r[P1]!=NULL goto P2"), /* 52 */ "Ne" OpHelp("IF r[P3]!=r[P1]"), @@ -35443,12 +37049,12 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 56 */ "Lt" OpHelp("IF r[P3]=r[P1]"), /* 58 */ "ElseEq" OpHelp(""), - /* 59 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), - /* 60 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), - /* 61 */ "IncrVacuum" OpHelp(""), - /* 62 */ "VNext" OpHelp(""), - /* 63 */ "Filter" OpHelp("if key(P3@P4) not in filter(P1) goto P2"), - /* 64 */ "Init" OpHelp("Start at P2"), + /* 59 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"), + /* 60 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"), + /* 61 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"), + /* 62 */ "IncrVacuum" OpHelp(""), + /* 63 */ "VNext" OpHelp(""), + /* 64 */ "Filter" OpHelp("if key(P3@P4) not in filter(P1) goto P2"), /* 65 */ "PureFunc" OpHelp("r[P3]=func(r[P2@NP])"), /* 66 */ "Function" OpHelp("r[P3]=func(r[P2@NP])"), /* 67 */ "Return" OpHelp(""), @@ -35558,25 +37164,1010 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* 171 */ "VCreate" OpHelp(""), /* 172 */ "VDestroy" OpHelp(""), /* 173 */ "VOpen" OpHelp(""), - /* 174 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), - /* 175 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), - /* 176 */ "VRename" OpHelp(""), - /* 177 */ "Pagecount" OpHelp(""), - /* 178 */ "MaxPgcnt" OpHelp(""), - /* 179 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), - /* 180 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), - /* 181 */ "Trace" OpHelp(""), - /* 182 */ "CursorHint" OpHelp(""), - /* 183 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), - /* 184 */ "Noop" OpHelp(""), - /* 185 */ "Explain" OpHelp(""), - /* 186 */ "Abortable" OpHelp(""), + /* 174 */ "VCheck" OpHelp(""), + /* 175 */ "VInitIn" OpHelp("r[P2]=ValueList(P1,P3)"), + /* 176 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"), + /* 177 */ "VRename" OpHelp(""), + /* 178 */ "Pagecount" OpHelp(""), + /* 179 */ "MaxPgcnt" OpHelp(""), + /* 180 */ "ClrSubtype" OpHelp("r[P1].subtype = 0"), + /* 181 */ "GetSubtype" OpHelp("r[P2] = r[P1].subtype"), + /* 182 */ "SetSubtype" OpHelp("r[P2].subtype = r[P1]"), + /* 183 */ "FilterAdd" OpHelp("filter(P1) += key(P3@P4)"), + /* 184 */ "Trace" OpHelp(""), + /* 185 */ "CursorHint" OpHelp(""), + /* 186 */ "ReleaseReg" OpHelp("release r[P1@P2] mask P3"), + /* 187 */ "Noop" OpHelp(""), + /* 188 */ "Explain" OpHelp(""), + /* 189 */ "Abortable" OpHelp(""), }; return azName[i]; } #endif /************** End of opcodes.c *********************************************/ +/************** Begin file os_kv.c *******************************************/ +/* +** 2022-09-06 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +****************************************************************************** +** +** This file contains an experimental VFS layer that operates on a +** Key/Value storage engine where both keys and values must be pure +** text. +*/ +/* #include */ +#if SQLITE_OS_KV || (SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL)) + +/***************************************************************************** +** Debugging logic +*/ + +/* SQLITE_KV_TRACE() is used for tracing calls to kvstorage routines. */ +#if 0 +#define SQLITE_KV_TRACE(X) printf X +#else +#define SQLITE_KV_TRACE(X) +#endif + +/* SQLITE_KV_LOG() is used for tracing calls to the VFS interface */ +#if 0 +#define SQLITE_KV_LOG(X) printf X +#else +#define SQLITE_KV_LOG(X) +#endif + + +/* +** Forward declaration of objects used by this VFS implementation +*/ +typedef struct KVVfsFile KVVfsFile; + +/* A single open file. There are only two files represented by this +** VFS - the database and the rollback journal. +*/ +struct KVVfsFile { + sqlite3_file base; /* IO methods */ + const char *zClass; /* Storage class */ + int isJournal; /* True if this is a journal file */ + unsigned int nJrnl; /* Space allocated for aJrnl[] */ + char *aJrnl; /* Journal content */ + int szPage; /* Last known page size */ + sqlite3_int64 szDb; /* Database file size. -1 means unknown */ + char *aData; /* Buffer to hold page data */ +}; +#define SQLITE_KVOS_SZ 133073 + +/* +** Methods for KVVfsFile +*/ +static int kvvfsClose(sqlite3_file*); +static int kvvfsReadDb(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int kvvfsReadJrnl(sqlite3_file*, void*, int iAmt, sqlite3_int64 iOfst); +static int kvvfsWriteDb(sqlite3_file*,const void*,int iAmt, sqlite3_int64); +static int kvvfsWriteJrnl(sqlite3_file*,const void*,int iAmt, sqlite3_int64); +static int kvvfsTruncateDb(sqlite3_file*, sqlite3_int64 size); +static int kvvfsTruncateJrnl(sqlite3_file*, sqlite3_int64 size); +static int kvvfsSyncDb(sqlite3_file*, int flags); +static int kvvfsSyncJrnl(sqlite3_file*, int flags); +static int kvvfsFileSizeDb(sqlite3_file*, sqlite3_int64 *pSize); +static int kvvfsFileSizeJrnl(sqlite3_file*, sqlite3_int64 *pSize); +static int kvvfsLock(sqlite3_file*, int); +static int kvvfsUnlock(sqlite3_file*, int); +static int kvvfsCheckReservedLock(sqlite3_file*, int *pResOut); +static int kvvfsFileControlDb(sqlite3_file*, int op, void *pArg); +static int kvvfsFileControlJrnl(sqlite3_file*, int op, void *pArg); +static int kvvfsSectorSize(sqlite3_file*); +static int kvvfsDeviceCharacteristics(sqlite3_file*); + +/* +** Methods for sqlite3_vfs +*/ +static int kvvfsOpen(sqlite3_vfs*, const char *, sqlite3_file*, int , int *); +static int kvvfsDelete(sqlite3_vfs*, const char *zName, int syncDir); +static int kvvfsAccess(sqlite3_vfs*, const char *zName, int flags, int *); +static int kvvfsFullPathname(sqlite3_vfs*, const char *zName, int, char *zOut); +static void *kvvfsDlOpen(sqlite3_vfs*, const char *zFilename); +static int kvvfsRandomness(sqlite3_vfs*, int nByte, char *zOut); +static int kvvfsSleep(sqlite3_vfs*, int microseconds); +static int kvvfsCurrentTime(sqlite3_vfs*, double*); +static int kvvfsCurrentTimeInt64(sqlite3_vfs*, sqlite3_int64*); + +static sqlite3_vfs sqlite3OsKvvfsObject = { + 1, /* iVersion */ + sizeof(KVVfsFile), /* szOsFile */ + 1024, /* mxPathname */ + 0, /* pNext */ + "kvvfs", /* zName */ + 0, /* pAppData */ + kvvfsOpen, /* xOpen */ + kvvfsDelete, /* xDelete */ + kvvfsAccess, /* xAccess */ + kvvfsFullPathname, /* xFullPathname */ + kvvfsDlOpen, /* xDlOpen */ + 0, /* xDlError */ + 0, /* xDlSym */ + 0, /* xDlClose */ + kvvfsRandomness, /* xRandomness */ + kvvfsSleep, /* xSleep */ + kvvfsCurrentTime, /* xCurrentTime */ + 0, /* xGetLastError */ + kvvfsCurrentTimeInt64 /* xCurrentTimeInt64 */ +}; + +/* Methods for sqlite3_file objects referencing a database file +*/ +static sqlite3_io_methods kvvfs_db_io_methods = { + 1, /* iVersion */ + kvvfsClose, /* xClose */ + kvvfsReadDb, /* xRead */ + kvvfsWriteDb, /* xWrite */ + kvvfsTruncateDb, /* xTruncate */ + kvvfsSyncDb, /* xSync */ + kvvfsFileSizeDb, /* xFileSize */ + kvvfsLock, /* xLock */ + kvvfsUnlock, /* xUnlock */ + kvvfsCheckReservedLock, /* xCheckReservedLock */ + kvvfsFileControlDb, /* xFileControl */ + kvvfsSectorSize, /* xSectorSize */ + kvvfsDeviceCharacteristics, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0, /* xShmUnmap */ + 0, /* xFetch */ + 0 /* xUnfetch */ +}; + +/* Methods for sqlite3_file objects referencing a rollback journal +*/ +static sqlite3_io_methods kvvfs_jrnl_io_methods = { + 1, /* iVersion */ + kvvfsClose, /* xClose */ + kvvfsReadJrnl, /* xRead */ + kvvfsWriteJrnl, /* xWrite */ + kvvfsTruncateJrnl, /* xTruncate */ + kvvfsSyncJrnl, /* xSync */ + kvvfsFileSizeJrnl, /* xFileSize */ + kvvfsLock, /* xLock */ + kvvfsUnlock, /* xUnlock */ + kvvfsCheckReservedLock, /* xCheckReservedLock */ + kvvfsFileControlJrnl, /* xFileControl */ + kvvfsSectorSize, /* xSectorSize */ + kvvfsDeviceCharacteristics, /* xDeviceCharacteristics */ + 0, /* xShmMap */ + 0, /* xShmLock */ + 0, /* xShmBarrier */ + 0, /* xShmUnmap */ + 0, /* xFetch */ + 0 /* xUnfetch */ +}; + +/****** Storage subsystem **************************************************/ +#include +#include +#include + +/* Forward declarations for the low-level storage engine +*/ +static int kvstorageWrite(const char*, const char *zKey, const char *zData); +static int kvstorageDelete(const char*, const char *zKey); +static int kvstorageRead(const char*, const char *zKey, char *zBuf, int nBuf); +#define KVSTORAGE_KEY_SZ 32 + +/* Expand the key name with an appropriate prefix and put the result +** zKeyOut[]. The zKeyOut[] buffer is assumed to hold at least +** KVSTORAGE_KEY_SZ bytes. +*/ +static void kvstorageMakeKey( + const char *zClass, + const char *zKeyIn, + char *zKeyOut +){ + sqlite3_snprintf(KVSTORAGE_KEY_SZ, zKeyOut, "kvvfs-%s-%s", zClass, zKeyIn); +} + +/* Write content into a key. zClass is the particular namespace of the +** underlying key/value store to use - either "local" or "session". +** +** Both zKey and zData are zero-terminated pure text strings. +** +** Return the number of errors. +*/ +static int kvstorageWrite( + const char *zClass, + const char *zKey, + const char *zData +){ + FILE *fd; + char zXKey[KVSTORAGE_KEY_SZ]; + kvstorageMakeKey(zClass, zKey, zXKey); + fd = fopen(zXKey, "wb"); + if( fd ){ + SQLITE_KV_TRACE(("KVVFS-WRITE %-15s (%d) %.50s%s\n", zXKey, + (int)strlen(zData), zData, + strlen(zData)>50 ? "..." : "")); + fputs(zData, fd); + fclose(fd); + return 0; + }else{ + return 1; + } +} + +/* Delete a key (with its corresponding data) from the key/value +** namespace given by zClass. If the key does not previously exist, +** this routine is a no-op. +*/ +static int kvstorageDelete(const char *zClass, const char *zKey){ + char zXKey[KVSTORAGE_KEY_SZ]; + kvstorageMakeKey(zClass, zKey, zXKey); + unlink(zXKey); + SQLITE_KV_TRACE(("KVVFS-DELETE %-15s\n", zXKey)); + return 0; +} + +/* Read the value associated with a zKey from the key/value namespace given +** by zClass and put the text data associated with that key in the first +** nBuf bytes of zBuf[]. The value might be truncated if zBuf is not large +** enough to hold it all. The value put into zBuf must always be zero +** terminated, even if it gets truncated because nBuf is not large enough. +** +** Return the total number of bytes in the data, without truncation, and +** not counting the final zero terminator. Return -1 if the key does +** not exist. +** +** If nBuf<=0 then this routine simply returns the size of the data without +** actually reading it. +*/ +static int kvstorageRead( + const char *zClass, + const char *zKey, + char *zBuf, + int nBuf +){ + FILE *fd; + struct stat buf; + char zXKey[KVSTORAGE_KEY_SZ]; + kvstorageMakeKey(zClass, zKey, zXKey); + if( access(zXKey, R_OK)!=0 + || stat(zXKey, &buf)!=0 + || !S_ISREG(buf.st_mode) + ){ + SQLITE_KV_TRACE(("KVVFS-READ %-15s (-1)\n", zXKey)); + return -1; + } + if( nBuf<=0 ){ + return (int)buf.st_size; + }else if( nBuf==1 ){ + zBuf[0] = 0; + SQLITE_KV_TRACE(("KVVFS-READ %-15s (%d)\n", zXKey, + (int)buf.st_size)); + return (int)buf.st_size; + } + if( nBuf > buf.st_size + 1 ){ + nBuf = buf.st_size + 1; + } + fd = fopen(zXKey, "rb"); + if( fd==0 ){ + SQLITE_KV_TRACE(("KVVFS-READ %-15s (-1)\n", zXKey)); + return -1; + }else{ + sqlite3_int64 n = fread(zBuf, 1, nBuf-1, fd); + fclose(fd); + zBuf[n] = 0; + SQLITE_KV_TRACE(("KVVFS-READ %-15s (%lld) %.50s%s\n", zXKey, + n, zBuf, n>50 ? "..." : "")); + return (int)n; + } +} + +/* +** An internal level of indirection which enables us to replace the +** kvvfs i/o methods with JavaScript implementations in WASM builds. +** Maintenance reminder: if this struct changes in any way, the JSON +** rendering of its structure must be updated in +** sqlite3_wasm_enum_json(). There are no binary compatibility +** concerns, so it does not need an iVersion member. This file is +** necessarily always compiled together with sqlite3_wasm_enum_json(), +** and JS code dynamically creates the mapping of members based on +** that JSON description. +*/ +typedef struct sqlite3_kvvfs_methods sqlite3_kvvfs_methods; +struct sqlite3_kvvfs_methods { + int (*xRead)(const char *zClass, const char *zKey, char *zBuf, int nBuf); + int (*xWrite)(const char *zClass, const char *zKey, const char *zData); + int (*xDelete)(const char *zClass, const char *zKey); + const int nKeySize; +}; + +/* +** This object holds the kvvfs I/O methods which may be swapped out +** for JavaScript-side implementations in WASM builds. In such builds +** it cannot be const, but in native builds it should be so that +** the compiler can hopefully optimize this level of indirection out. +** That said, kvvfs is intended primarily for use in WASM builds. +** +** Note that this is not explicitly flagged as static because the +** amalgamation build will tag it with SQLITE_PRIVATE. +*/ +#ifndef SQLITE_WASM +const +#endif +SQLITE_PRIVATE sqlite3_kvvfs_methods sqlite3KvvfsMethods = { +kvstorageRead, +kvstorageWrite, +kvstorageDelete, +KVSTORAGE_KEY_SZ +}; + +/****** Utility subroutines ************************************************/ + +/* +** Encode binary into the text encoded used to persist on disk. +** The output text is stored in aOut[], which must be at least +** nData+1 bytes in length. +** +** Return the actual length of the encoded text, not counting the +** zero terminator at the end. +** +** Encoding format +** --------------- +** +** * Non-zero bytes are encoded as upper-case hexadecimal +** +** * A sequence of one or more zero-bytes that are not at the +** beginning of the buffer are encoded as a little-endian +** base-26 number using a..z. "a" means 0. "b" means 1, +** "z" means 25. "ab" means 26. "ac" means 52. And so forth. +** +** * Because there is no overlap between the encoding characters +** of hexadecimal and base-26 numbers, it is always clear where +** one stops and the next begins. +*/ +static int kvvfsEncode(const char *aData, int nData, char *aOut){ + int i, j; + const unsigned char *a = (const unsigned char*)aData; + for(i=j=0; i>4]; + aOut[j++] = "0123456789ABCDEF"[c&0xf]; + }else{ + /* A sequence of 1 or more zeros is stored as a little-endian + ** base-26 number using a..z as the digits. So one zero is "b". + ** Two zeros is "c". 25 zeros is "z", 26 zeros is "ab", 27 is "bb", + ** and so forth. + */ + int k; + for(k=1; i+k0 ){ + aOut[j++] = 'a'+(k%26); + k /= 26; + } + } + } + aOut[j] = 0; + return j; +} + +static const signed char kvvfsHexValue[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 +}; + +/* +** Decode the text encoding back to binary. The binary content is +** written into pOut, which must be at least nOut bytes in length. +** +** The return value is the number of bytes actually written into aOut[]. +*/ +static int kvvfsDecode(const char *a, char *aOut, int nOut){ + int i, j; + int c; + const unsigned char *aIn = (const unsigned char*)a; + i = 0; + j = 0; + while( 1 ){ + c = kvvfsHexValue[aIn[i]]; + if( c<0 ){ + int n = 0; + int mult = 1; + c = aIn[i]; + if( c==0 ) break; + while( c>='a' && c<='z' ){ + n += (c - 'a')*mult; + mult *= 26; + c = aIn[++i]; + } + if( j+n>nOut ) return -1; + memset(&aOut[j], 0, n); + j += n; + if( c==0 || mult==1 ) break; /* progress stalled if mult==1 */ + }else{ + aOut[j] = c<<4; + c = kvvfsHexValue[aIn[++i]]; + if( c<0 ) break; + aOut[j++] += c; + i++; + } + } + return j; +} + +/* +** Decode a complete journal file. Allocate space in pFile->aJrnl +** and store the decoding there. Or leave pFile->aJrnl set to NULL +** if an error is encountered. +** +** The first few characters of the text encoding will be a little-endian +** base-26 number (digits a..z) that is the total number of bytes +** in the decoded journal file image. This base-26 number is followed +** by a single space, then the encoding of the journal. The space +** separator is required to act as a terminator for the base-26 number. +*/ +static void kvvfsDecodeJournal( + KVVfsFile *pFile, /* Store decoding in pFile->aJrnl */ + const char *zTxt, /* Text encoding. Zero-terminated */ + int nTxt /* Bytes in zTxt, excluding zero terminator */ +){ + unsigned int n = 0; + int c, i, mult; + i = 0; + mult = 1; + while( (c = zTxt[i++])>='a' && c<='z' ){ + n += (zTxt[i] - 'a')*mult; + mult *= 26; + } + sqlite3_free(pFile->aJrnl); + pFile->aJrnl = sqlite3_malloc64( n ); + if( pFile->aJrnl==0 ){ + pFile->nJrnl = 0; + return; + } + pFile->nJrnl = n; + n = kvvfsDecode(zTxt+i, pFile->aJrnl, pFile->nJrnl); + if( nnJrnl ){ + sqlite3_free(pFile->aJrnl); + pFile->aJrnl = 0; + pFile->nJrnl = 0; + } +} + +/* +** Read or write the "sz" element, containing the database file size. +*/ +static sqlite3_int64 kvvfsReadFileSize(KVVfsFile *pFile){ + char zData[50]; + zData[0] = 0; + sqlite3KvvfsMethods.xRead(pFile->zClass, "sz", zData, sizeof(zData)-1); + return strtoll(zData, 0, 0); +} +static int kvvfsWriteFileSize(KVVfsFile *pFile, sqlite3_int64 sz){ + char zData[50]; + sqlite3_snprintf(sizeof(zData), zData, "%lld", sz); + return sqlite3KvvfsMethods.xWrite(pFile->zClass, "sz", zData); +} + +/****** sqlite3_io_methods methods ******************************************/ + +/* +** Close an kvvfs-file. +*/ +static int kvvfsClose(sqlite3_file *pProtoFile){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + + SQLITE_KV_LOG(("xClose %s %s\n", pFile->zClass, + pFile->isJournal ? "journal" : "db")); + sqlite3_free(pFile->aJrnl); + sqlite3_free(pFile->aData); + return SQLITE_OK; +} + +/* +** Read from the -journal file. +*/ +static int kvvfsReadJrnl( + sqlite3_file *pProtoFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + assert( pFile->isJournal ); + SQLITE_KV_LOG(("xRead('%s-journal',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); + if( pFile->aJrnl==0 ){ + int szTxt = kvstorageRead(pFile->zClass, "jrnl", 0, 0); + char *aTxt; + if( szTxt<=4 ){ + return SQLITE_IOERR; + } + aTxt = sqlite3_malloc64( szTxt+1 ); + if( aTxt==0 ) return SQLITE_NOMEM; + kvstorageRead(pFile->zClass, "jrnl", aTxt, szTxt+1); + kvvfsDecodeJournal(pFile, aTxt, szTxt); + sqlite3_free(aTxt); + if( pFile->aJrnl==0 ) return SQLITE_IOERR; + } + if( iOfst+iAmt>pFile->nJrnl ){ + return SQLITE_IOERR_SHORT_READ; + } + memcpy(zBuf, pFile->aJrnl+iOfst, iAmt); + return SQLITE_OK; +} + +/* +** Read from the database file. +*/ +static int kvvfsReadDb( + sqlite3_file *pProtoFile, + void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + unsigned int pgno; + int got, n; + char zKey[30]; + char *aData = pFile->aData; + assert( iOfst>=0 ); + assert( iAmt>=0 ); + SQLITE_KV_LOG(("xRead('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); + if( iOfst+iAmt>=512 ){ + if( (iOfst % iAmt)!=0 ){ + return SQLITE_IOERR_READ; + } + if( (iAmt & (iAmt-1))!=0 || iAmt<512 || iAmt>65536 ){ + return SQLITE_IOERR_READ; + } + pFile->szPage = iAmt; + pgno = 1 + iOfst/iAmt; + }else{ + pgno = 1; + } + sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); + got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, + aData, SQLITE_KVOS_SZ-1); + if( got<0 ){ + n = 0; + }else{ + aData[got] = 0; + if( iOfst+iAmt<512 ){ + int k = iOfst+iAmt; + aData[k*2] = 0; + n = kvvfsDecode(aData, &aData[2000], SQLITE_KVOS_SZ-2000); + if( n>=iOfst+iAmt ){ + memcpy(zBuf, &aData[2000+iOfst], iAmt); + n = iAmt; + }else{ + n = 0; + } + }else{ + n = kvvfsDecode(aData, zBuf, iAmt); + } + } + if( nzClass, iAmt, iOfst)); + if( iEnd>=0x10000000 ) return SQLITE_FULL; + if( pFile->aJrnl==0 || pFile->nJrnlaJrnl, iEnd); + if( aNew==0 ){ + return SQLITE_IOERR_NOMEM; + } + pFile->aJrnl = aNew; + if( pFile->nJrnlaJrnl+pFile->nJrnl, 0, iOfst-pFile->nJrnl); + } + pFile->nJrnl = iEnd; + } + memcpy(pFile->aJrnl+iOfst, zBuf, iAmt); + return SQLITE_OK; +} + +/* +** Write into the database file. +*/ +static int kvvfsWriteDb( + sqlite3_file *pProtoFile, + const void *zBuf, + int iAmt, + sqlite_int64 iOfst +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + unsigned int pgno; + char zKey[30]; + char *aData = pFile->aData; + SQLITE_KV_LOG(("xWrite('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst)); + assert( iAmt>=512 && iAmt<=65536 ); + assert( (iAmt & (iAmt-1))==0 ); + assert( pFile->szPage<0 || pFile->szPage==iAmt ); + pFile->szPage = iAmt; + pgno = 1 + iOfst/iAmt; + sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); + kvvfsEncode(zBuf, iAmt, aData); + if( sqlite3KvvfsMethods.xWrite(pFile->zClass, zKey, aData) ){ + return SQLITE_IOERR; + } + if( iOfst+iAmt > pFile->szDb ){ + pFile->szDb = iOfst + iAmt; + } + return SQLITE_OK; +} + +/* +** Truncate an kvvfs-file. +*/ +static int kvvfsTruncateJrnl(sqlite3_file *pProtoFile, sqlite_int64 size){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + SQLITE_KV_LOG(("xTruncate('%s-journal',%lld)\n", pFile->zClass, size)); + assert( size==0 ); + sqlite3KvvfsMethods.xDelete(pFile->zClass, "jrnl"); + sqlite3_free(pFile->aJrnl); + pFile->aJrnl = 0; + pFile->nJrnl = 0; + return SQLITE_OK; +} +static int kvvfsTruncateDb(sqlite3_file *pProtoFile, sqlite_int64 size){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + if( pFile->szDb>size + && pFile->szPage>0 + && (size % pFile->szPage)==0 + ){ + char zKey[50]; + unsigned int pgno, pgnoMax; + SQLITE_KV_LOG(("xTruncate('%s-db',%lld)\n", pFile->zClass, size)); + pgno = 1 + size/pFile->szPage; + pgnoMax = 2 + pFile->szDb/pFile->szPage; + while( pgno<=pgnoMax ){ + sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno); + sqlite3KvvfsMethods.xDelete(pFile->zClass, zKey); + pgno++; + } + pFile->szDb = size; + return kvvfsWriteFileSize(pFile, size) ? SQLITE_IOERR : SQLITE_OK; + } + return SQLITE_IOERR; +} + +/* +** Sync an kvvfs-file. +*/ +static int kvvfsSyncJrnl(sqlite3_file *pProtoFile, int flags){ + int i, n; + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + char *zOut; + SQLITE_KV_LOG(("xSync('%s-journal')\n", pFile->zClass)); + if( pFile->nJrnl<=0 ){ + return kvvfsTruncateJrnl(pProtoFile, 0); + } + zOut = sqlite3_malloc64( pFile->nJrnl*2 + 50 ); + if( zOut==0 ){ + return SQLITE_IOERR_NOMEM; + } + n = pFile->nJrnl; + i = 0; + do{ + zOut[i++] = 'a' + (n%26); + n /= 26; + }while( n>0 ); + zOut[i++] = ' '; + kvvfsEncode(pFile->aJrnl, pFile->nJrnl, &zOut[i]); + i = sqlite3KvvfsMethods.xWrite(pFile->zClass, "jrnl", zOut); + sqlite3_free(zOut); + return i ? SQLITE_IOERR : SQLITE_OK; +} +static int kvvfsSyncDb(sqlite3_file *pProtoFile, int flags){ + return SQLITE_OK; +} + +/* +** Return the current file-size of an kvvfs-file. +*/ +static int kvvfsFileSizeJrnl(sqlite3_file *pProtoFile, sqlite_int64 *pSize){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + SQLITE_KV_LOG(("xFileSize('%s-journal')\n", pFile->zClass)); + *pSize = pFile->nJrnl; + return SQLITE_OK; +} +static int kvvfsFileSizeDb(sqlite3_file *pProtoFile, sqlite_int64 *pSize){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + SQLITE_KV_LOG(("xFileSize('%s-db')\n", pFile->zClass)); + if( pFile->szDb>=0 ){ + *pSize = pFile->szDb; + }else{ + *pSize = kvvfsReadFileSize(pFile); + } + return SQLITE_OK; +} + +/* +** Lock an kvvfs-file. +*/ +static int kvvfsLock(sqlite3_file *pProtoFile, int eLock){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + assert( !pFile->isJournal ); + SQLITE_KV_LOG(("xLock(%s,%d)\n", pFile->zClass, eLock)); + + if( eLock!=SQLITE_LOCK_NONE ){ + pFile->szDb = kvvfsReadFileSize(pFile); + } + return SQLITE_OK; +} + +/* +** Unlock an kvvfs-file. +*/ +static int kvvfsUnlock(sqlite3_file *pProtoFile, int eLock){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + assert( !pFile->isJournal ); + SQLITE_KV_LOG(("xUnlock(%s,%d)\n", pFile->zClass, eLock)); + if( eLock==SQLITE_LOCK_NONE ){ + pFile->szDb = -1; + } + return SQLITE_OK; +} + +/* +** Check if another file-handle holds a RESERVED lock on an kvvfs-file. +*/ +static int kvvfsCheckReservedLock(sqlite3_file *pProtoFile, int *pResOut){ + SQLITE_KV_LOG(("xCheckReservedLock\n")); + *pResOut = 0; + return SQLITE_OK; +} + +/* +** File control method. For custom operations on an kvvfs-file. +*/ +static int kvvfsFileControlJrnl(sqlite3_file *pProtoFile, int op, void *pArg){ + SQLITE_KV_LOG(("xFileControl(%d) on journal\n", op)); + return SQLITE_NOTFOUND; +} +static int kvvfsFileControlDb(sqlite3_file *pProtoFile, int op, void *pArg){ + SQLITE_KV_LOG(("xFileControl(%d) on database\n", op)); + if( op==SQLITE_FCNTL_SYNC ){ + KVVfsFile *pFile = (KVVfsFile *)pProtoFile; + int rc = SQLITE_OK; + SQLITE_KV_LOG(("xSync('%s-db')\n", pFile->zClass)); + if( pFile->szDb>0 && 0!=kvvfsWriteFileSize(pFile, pFile->szDb) ){ + rc = SQLITE_IOERR; + } + return rc; + } + return SQLITE_NOTFOUND; +} + +/* +** Return the sector-size in bytes for an kvvfs-file. +*/ +static int kvvfsSectorSize(sqlite3_file *pFile){ + return 512; +} + +/* +** Return the device characteristic flags supported by an kvvfs-file. +*/ +static int kvvfsDeviceCharacteristics(sqlite3_file *pProtoFile){ + return 0; +} + +/****** sqlite3_vfs methods *************************************************/ + +/* +** Open an kvvfs file handle. +*/ +static int kvvfsOpen( + sqlite3_vfs *pProtoVfs, + const char *zName, + sqlite3_file *pProtoFile, + int flags, + int *pOutFlags +){ + KVVfsFile *pFile = (KVVfsFile*)pProtoFile; + if( zName==0 ) zName = ""; + SQLITE_KV_LOG(("xOpen(\"%s\")\n", zName)); + if( strcmp(zName, "local")==0 + || strcmp(zName, "session")==0 + ){ + pFile->isJournal = 0; + pFile->base.pMethods = &kvvfs_db_io_methods; + }else + if( strcmp(zName, "local-journal")==0 + || strcmp(zName, "session-journal")==0 + ){ + pFile->isJournal = 1; + pFile->base.pMethods = &kvvfs_jrnl_io_methods; + }else{ + return SQLITE_CANTOPEN; + } + if( zName[0]=='s' ){ + pFile->zClass = "session"; + }else{ + pFile->zClass = "local"; + } + pFile->aData = sqlite3_malloc64(SQLITE_KVOS_SZ); + if( pFile->aData==0 ){ + return SQLITE_NOMEM; + } + pFile->aJrnl = 0; + pFile->nJrnl = 0; + pFile->szPage = -1; + pFile->szDb = -1; + return SQLITE_OK; +} + +/* +** Delete the file located at zPath. If the dirSync argument is true, +** ensure the file-system modifications are synced to disk before +** returning. +*/ +static int kvvfsDelete(sqlite3_vfs *pVfs, const char *zPath, int dirSync){ + if( strcmp(zPath, "local-journal")==0 ){ + sqlite3KvvfsMethods.xDelete("local", "jrnl"); + }else + if( strcmp(zPath, "session-journal")==0 ){ + sqlite3KvvfsMethods.xDelete("session", "jrnl"); + } + return SQLITE_OK; +} + +/* +** Test for access permissions. Return true if the requested permission +** is available, or false otherwise. +*/ +static int kvvfsAccess( + sqlite3_vfs *pProtoVfs, + const char *zPath, + int flags, + int *pResOut +){ + SQLITE_KV_LOG(("xAccess(\"%s\")\n", zPath)); + if( strcmp(zPath, "local-journal")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("local", "jrnl", 0, 0)>0; + }else + if( strcmp(zPath, "session-journal")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("session", "jrnl", 0, 0)>0; + }else + if( strcmp(zPath, "local")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("local", "sz", 0, 0)>0; + }else + if( strcmp(zPath, "session")==0 ){ + *pResOut = sqlite3KvvfsMethods.xRead("session", "sz", 0, 0)>0; + }else + { + *pResOut = 0; + } + SQLITE_KV_LOG(("xAccess returns %d\n",*pResOut)); + return SQLITE_OK; +} + +/* +** Populate buffer zOut with the full canonical pathname corresponding +** to the pathname in zPath. zOut is guaranteed to point to a buffer +** of at least (INST_MAX_PATHNAME+1) bytes. +*/ +static int kvvfsFullPathname( + sqlite3_vfs *pVfs, + const char *zPath, + int nOut, + char *zOut +){ + size_t nPath; +#ifdef SQLITE_OS_KV_ALWAYS_LOCAL + zPath = "local"; +#endif + nPath = strlen(zPath); + SQLITE_KV_LOG(("xFullPathname(\"%s\")\n", zPath)); + if( nOut +static int kvvfsCurrentTimeInt64(sqlite3_vfs *pVfs, sqlite3_int64 *pTimeOut){ + static const sqlite3_int64 unixEpoch = 24405875*(sqlite3_int64)8640000; + struct timeval sNow; + (void)gettimeofday(&sNow, 0); /* Cannot fail given valid arguments */ + *pTimeOut = unixEpoch + 1000*(sqlite3_int64)sNow.tv_sec + sNow.tv_usec/1000; + return SQLITE_OK; +} +#endif /* SQLITE_OS_KV || SQLITE_OS_UNIX */ + +#if SQLITE_OS_KV +/* +** This routine is called initialize the KV-vfs as the default VFS. +*/ +SQLITE_API int sqlite3_os_init(void){ + return sqlite3_vfs_register(&sqlite3OsKvvfsObject, 1); +} +SQLITE_API int sqlite3_os_end(void){ + return SQLITE_OK; +} +#endif /* SQLITE_OS_KV */ + +#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) +SQLITE_PRIVATE int sqlite3KvvfsInit(void){ + return sqlite3_vfs_register(&sqlite3OsKvvfsObject, 0); +} +#endif + +/************** End of os_kv.c ***********************************************/ /************** Begin file os_unix.c *****************************************/ /* ** 2004 May 22 @@ -35602,7 +38193,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ ** This source file is organized into divisions where the logic for various ** subfunctions is contained within the appropriate division. PLEASE ** KEEP THE STRUCTURE OF THIS FILE INTACT. New code should be placed -** in the correct division and should be clearly labeled. +** in the correct division and should be clearly labelled. ** ** The layout of divisions is as follows: ** @@ -35652,7 +38243,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ #endif /* Use pread() and pwrite() if they are available */ -#if defined(__APPLE__) +#if defined(__APPLE__) || defined(__linux__) # define HAVE_PREAD 1 # define HAVE_PWRITE 1 #endif @@ -35667,15 +38258,16 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ /* ** standard include files. */ -#include -#include +#include /* amalgamator: keep */ +#include /* amalgamator: keep */ #include #include -#include +#include /* amalgamator: keep */ /* #include */ -#include +#include /* amalgamator: keep */ #include -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \ + && !defined(SQLITE_WASI) # include #endif @@ -35763,9 +38355,46 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){ */ #define SQLITE_MAX_SYMLINKS 100 +/* +** Remove and stub certain info for WASI (WebAssembly System +** Interface) builds. +*/ +#ifdef SQLITE_WASI +# undef HAVE_FCHMOD +# undef HAVE_FCHOWN +# undef HAVE_MREMAP +# define HAVE_MREMAP 0 +# ifndef SQLITE_DEFAULT_UNIX_VFS +# define SQLITE_DEFAULT_UNIX_VFS "unix-dotfile" + /* ^^^ should SQLITE_DEFAULT_UNIX_VFS be "unix-none"? */ +# endif +# ifndef F_RDLCK +# define F_RDLCK 0 +# define F_WRLCK 1 +# define F_UNLCK 2 +# if __LONG_MAX == 0x7fffffffL +# define F_GETLK 12 +# define F_SETLK 13 +# define F_SETLKW 14 +# else +# define F_GETLK 5 +# define F_SETLK 6 +# define F_SETLKW 7 +# endif +# endif +#else /* !SQLITE_WASI */ +# ifndef HAVE_FCHMOD +# define HAVE_FCHMOD +# endif +#endif /* SQLITE_WASI */ + +#ifdef SQLITE_WASI +# define osGetpid(X) (pid_t)1 +#else /* Always cast the getpid() return type for compatibility with ** kernel modules in VxWorks. */ -#define osGetpid(X) (pid_t)getpid() +# define osGetpid(X) (pid_t)getpid() +#endif /* ** Only set the lastErrno if the error code is a real error and not @@ -36037,7 +38666,11 @@ static struct unix_syscall { #define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off64_t))\ aSyscall[13].pCurrent) +#if defined(HAVE_FCHMOD) { "fchmod", (sqlite3_syscall_ptr)fchmod, 0 }, +#else + { "fchmod", (sqlite3_syscall_ptr)0, 0 }, +#endif #define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent) #if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE @@ -36073,14 +38706,16 @@ static struct unix_syscall { #endif #define osGeteuid ((uid_t(*)(void))aSyscall[21].pCurrent) -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \ + && !defined(SQLITE_WASI) { "mmap", (sqlite3_syscall_ptr)mmap, 0 }, #else { "mmap", (sqlite3_syscall_ptr)0, 0 }, #endif #define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[22].pCurrent) -#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0 +#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \ + && !defined(SQLITE_WASI) { "munmap", (sqlite3_syscall_ptr)munmap, 0 }, #else { "munmap", (sqlite3_syscall_ptr)0, 0 }, @@ -36145,7 +38780,7 @@ static int robustFchown(int fd, uid_t uid, gid_t gid){ /* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the -** "unix" VFSes. Return SQLITE_OK opon successfully updating the +** "unix" VFSes. Return SQLITE_OK upon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. */ @@ -36266,6 +38901,9 @@ static int robust_open(const char *z, int f, mode_t m){ break; } if( fd>=SQLITE_MINIMUM_FILE_DESCRIPTOR ) break; + if( (f & (O_EXCL|O_CREAT))==(O_EXCL|O_CREAT) ){ + (void)osUnlink(z); + } osClose(fd); sqlite3_log(SQLITE_WARNING, "attempt to open \"%s\" as file descriptor %d", z, fd); @@ -36664,7 +39302,7 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){ ** If you close a file descriptor that points to a file that has locks, ** all locks on that file that are owned by the current process are ** released. To work around this problem, each unixInodeInfo object -** maintains a count of the number of pending locks on tha inode. +** maintains a count of the number of pending locks on the inode. ** When an attempt is made to close an unixFile, if there are ** other unixFile open on the same inode that are holding locks, the call ** to close() the file descriptor is deferred until all of the locks clear. @@ -36678,7 +39316,7 @@ static void vxworksReleaseFileId(struct vxworksFileId *pId){ ** not posix compliant. Under LinuxThreads, a lock created by thread ** A cannot be modified or overridden by a different thread B. ** Only thread A can modify the lock. Locking behavior is correct -** if the appliation uses the newer Native Posix Thread Library (NPTL) +** if the application uses the newer Native Posix Thread Library (NPTL) ** on linux - with NPTL a lock created by thread A can override locks ** in thread B. But there is no way to know at compile-time which ** threading library is being used. So there is no way to know at @@ -36880,7 +39518,7 @@ static void storeLastErrno(unixFile *pFile, int error){ } /* -** Close all file descriptors accumuated in the unixInodeInfo->pUnused list. +** Close all file descriptors accumulated in the unixInodeInfo->pUnused list. */ static void closePendingFds(unixFile *pFile){ unixInodeInfo *pInode = pFile->pInode; @@ -37228,7 +39866,7 @@ static int unixFileLock(unixFile *pFile, struct flock *pLock){ ** ** UNLOCKED -> SHARED ** SHARED -> RESERVED -** SHARED -> (PENDING) -> EXCLUSIVE +** SHARED -> EXCLUSIVE ** RESERVED -> (PENDING) -> EXCLUSIVE ** PENDING -> EXCLUSIVE ** @@ -37243,7 +39881,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){ ** slightly in order to be compatible with Windows95 systems simultaneously ** accessing the same database file, in case that is ever required. ** - ** Symbols defined in os.h indentify the 'pending byte' and the 'reserved + ** Symbols defined in os.h identify the 'pending byte' and the 'reserved ** byte', each single bytes at well known offsets, and the 'shared byte ** range', a range of 510 bytes at a well known offset. ** @@ -37251,7 +39889,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){ ** byte'. If this is successful, 'shared byte range' is read-locked ** and the lock on the 'pending byte' released. (Legacy note: When ** SQLite was first developed, Windows95 systems were still very common, - ** and Widnows95 lacks a shared-lock capability. So on Windows95, a + ** and Windows95 lacks a shared-lock capability. So on Windows95, a ** single randomly selected by from the 'shared byte range' is locked. ** Windows95 is now pretty much extinct, but this work-around for the ** lack of shared-locks on Windows95 lives on, for backwards @@ -37261,19 +39899,20 @@ static int unixLock(sqlite3_file *id, int eFileLock){ ** A RESERVED lock is implemented by grabbing a write-lock on the ** 'reserved byte'. ** - ** A process may only obtain a PENDING lock after it has obtained a - ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock - ** on the 'pending byte'. This ensures that no new SHARED locks can be - ** obtained, but existing SHARED locks are allowed to persist. A process - ** does not have to obtain a RESERVED lock on the way to a PENDING lock. - ** This property is used by the algorithm for rolling back a journal file - ** after a crash. + ** An EXCLUSIVE lock may only be requested after either a SHARED or + ** RESERVED lock is held. An EXCLUSIVE lock is implemented by obtaining + ** a write-lock on the entire 'shared byte range'. Since all other locks + ** require a read-lock on one of the bytes within this range, this ensures + ** that no other locks are held on the database. ** - ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is - ** implemented by obtaining a write-lock on the entire 'shared byte - ** range'. Since all other locks require a read-lock on one of the bytes - ** within this range, this ensures that no other locks are held on the - ** database. + ** If a process that holds a RESERVED lock requests an EXCLUSIVE, then + ** a PENDING lock is obtained first. A PENDING lock is implemented by + ** obtaining a write-lock on the 'pending byte'. This ensures that no new + ** SHARED locks can be obtained, but existing SHARED locks are allowed to + ** persist. If the call to this function fails to obtain the EXCLUSIVE + ** lock in this case, it holds the PENDING lock instead. The client may + ** then re-attempt the EXCLUSIVE lock later on, after existing SHARED + ** locks have cleared. */ int rc = SQLITE_OK; unixFile *pFile = (unixFile*)id; @@ -37299,7 +39938,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){ /* Make sure the locking sequence is correct. ** (1) We never move from unlocked to anything higher than shared lock. - ** (2) SQLite never explicitly requests a pendig lock. + ** (2) SQLite never explicitly requests a pending lock. ** (3) A shared lock is always held when a reserve lock is requested. */ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); @@ -37344,7 +39983,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){ lock.l_len = 1L; lock.l_whence = SEEK_SET; if( eFileLock==SHARED_LOCK - || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLockeFileLock==RESERVED_LOCK) ){ lock.l_type = (eFileLock==SHARED_LOCK?F_RDLCK:F_WRLCK); lock.l_start = PENDING_BYTE; @@ -37355,6 +39994,9 @@ static int unixLock(sqlite3_file *id, int eFileLock){ storeLastErrno(pFile, tErrno); } goto end_lock; + }else if( eFileLock==EXCLUSIVE_LOCK ){ + pFile->eFileLock = PENDING_LOCK; + pInode->eFileLock = PENDING_LOCK; } } @@ -37442,13 +40084,9 @@ static int unixLock(sqlite3_file *id, int eFileLock){ } #endif - if( rc==SQLITE_OK ){ pFile->eFileLock = eFileLock; pInode->eFileLock = eFileLock; - }else if( eFileLock==EXCLUSIVE_LOCK ){ - pFile->eFileLock = PENDING_LOCK; - pInode->eFileLock = PENDING_LOCK; } end_lock: @@ -38518,7 +41156,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){ /* Make sure the locking sequence is correct ** (1) We never move from unlocked to anything higher than shared lock. - ** (2) SQLite never explicitly requests a pendig lock. + ** (2) SQLite never explicitly requests a pending lock. ** (3) A shared lock is always held when a reserve lock is requested. */ assert( pFile->eFileLock!=NO_LOCK || eFileLock==SHARED_LOCK ); @@ -38634,7 +41272,7 @@ static int afpLock(sqlite3_file *id, int eFileLock){ if( !(failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST + pInode->sharedByte, 1, 0)) ){ int failed2 = SQLITE_OK; - /* now attemmpt to get the exclusive lock range */ + /* now attempt to get the exclusive lock range */ failed = afpSetLock(context->dbPath, pFile, SHARED_FIRST, SHARED_SIZE, 1); if( failed && (failed2 = afpSetLock(context->dbPath, pFile, @@ -38683,9 +41321,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) { unixInodeInfo *pInode; afpLockingContext *context = (afpLockingContext *) pFile->lockingContext; int skipShared = 0; -#ifdef SQLITE_TEST - int h = pFile->h; -#endif assert( pFile ); OSTRACE(("UNLOCK %d %d was %d(%d,%d) pid=%d (afp)\n", pFile->h, eFileLock, @@ -38701,9 +41336,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) { assert( pInode->nShared!=0 ); if( pFile->eFileLock>SHARED_LOCK ){ assert( pInode->eFileLock==pFile->eFileLock ); - SimulateIOErrorBenign(1); - SimulateIOError( h=(-1) ) - SimulateIOErrorBenign(0); #ifdef SQLITE_DEBUG /* When reducing a lock such that other processes can start @@ -38752,9 +41384,6 @@ static int afpUnlock(sqlite3_file *id, int eFileLock) { unsigned long long sharedLockByte = SHARED_FIRST+pInode->sharedByte; pInode->nShared--; if( pInode->nShared==0 ){ - SimulateIOErrorBenign(1); - SimulateIOError( h=(-1) ) - SimulateIOErrorBenign(0); if( !skipShared ){ rc = afpSetLock(context->dbPath, pFile, sharedLockByte, 1, 0); } @@ -38855,12 +41484,6 @@ static int nfsUnlock(sqlite3_file *id, int eFileLock){ ** Seek to the offset passed as the second argument, then read cnt ** bytes into pBuf. Return the number of bytes actually read. ** -** NB: If you define USE_PREAD or USE_PREAD64, then it might also -** be necessary to define _XOPEN_SOURCE to be 500. This varies from -** one system to another. Since SQLite does not define USE_PREAD -** in any form by default, we will not attempt to define _XOPEN_SOURCE. -** See tickets #2741 and #2681. -** ** To avoid stomping the errno value on a failed read the lastErrno value ** is set before returning. */ @@ -38935,7 +41558,7 @@ static int unixRead( #endif #if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this read request as possible by transfering + /* Deal with as much of this read request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ @@ -39087,7 +41710,7 @@ static int unixWrite( #endif #if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this write request as possible by transfering + /* Deal with as much of this write request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ @@ -39209,7 +41832,7 @@ static int full_fsync(int fd, int fullSync, int dataOnly){ /* If we compiled with the SQLITE_NO_SYNC flag, then syncing is a ** no-op. But go ahead and call fstat() to validate the file ** descriptor as we need a method to provoke a failure during - ** coverate testing. + ** coverage testing. */ #ifdef SQLITE_NO_SYNC { @@ -39602,7 +42225,13 @@ static int unixFileControl(sqlite3_file *id, int op, void *pArg){ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT case SQLITE_FCNTL_LOCK_TIMEOUT: { int iOld = pFile->iBusyTimeout; +#if SQLITE_ENABLE_SETLK_TIMEOUT==1 pFile->iBusyTimeout = *(int*)pArg; +#elif SQLITE_ENABLE_SETLK_TIMEOUT==2 + pFile->iBusyTimeout = !!(*(int*)pArg); +#else +# error "SQLITE_ENABLE_SETLK_TIMEOUT must be set to 1 or 2" +#endif *(int*)pArg = iOld; return SQLITE_OK; } @@ -39855,6 +42484,25 @@ static int unixGetpagesize(void){ ** Either unixShmNode.pShmMutex must be held or unixShmNode.nRef==0 and ** unixMutexHeld() is true when reading or writing any other field ** in this structure. +** +** aLock[SQLITE_SHM_NLOCK]: +** This array records the various locks held by clients on each of the +** SQLITE_SHM_NLOCK slots. If the aLock[] entry is set to 0, then no +** locks are held by the process on this slot. If it is set to -1, then +** some client holds an EXCLUSIVE lock on the locking slot. If the aLock[] +** value is set to a positive value, then it is the number of shared +** locks currently held on the slot. +** +** aMutex[SQLITE_SHM_NLOCK]: +** Normally, when SQLITE_ENABLE_SETLK_TIMEOUT is not defined, mutex +** pShmMutex is used to protect the aLock[] array and the right to +** call fcntl() on unixShmNode.hShm to obtain or release locks. +** +** If SQLITE_ENABLE_SETLK_TIMEOUT is defined though, we use an array +** of mutexes - one for each locking slot. To read or write locking +** slot aLock[iSlot], the caller must hold the corresponding mutex +** aMutex[iSlot]. Similarly, to call fcntl() to obtain or release a +** lock corresponding to slot iSlot, mutex aMutex[iSlot] must be held. */ struct unixShmNode { unixInodeInfo *pInode; /* unixInodeInfo that owns this SHM node */ @@ -39868,10 +42516,11 @@ struct unixShmNode { char **apRegion; /* Array of mapped shared-memory regions */ int nRef; /* Number of unixShm objects pointing to this */ unixShm *pFirst; /* All unixShm objects pointing to this */ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + sqlite3_mutex *aMutex[SQLITE_SHM_NLOCK]; +#endif int aLock[SQLITE_SHM_NLOCK]; /* # shared locks on slot, -1==excl lock */ #ifdef SQLITE_DEBUG - u8 exclMask; /* Mask of exclusive locks held */ - u8 sharedMask; /* Mask of shared locks held */ u8 nextShmId; /* Next available unixShm.id value */ #endif }; @@ -39954,16 +42603,35 @@ static int unixShmSystemLock( struct flock f; /* The posix advisory locking structure */ int rc = SQLITE_OK; /* Result code form fcntl() */ - /* Access to the unixShmNode object is serialized by the caller */ pShmNode = pFile->pInode->pShmNode; - assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) ); - assert( pShmNode->nRef>0 || unixMutexHeld() ); + + /* Assert that the parameters are within expected range and that the + ** correct mutex or mutexes are held. */ + assert( pShmNode->nRef>=0 ); + assert( (ofst==UNIX_SHM_DMS && n==1) + || (ofst>=UNIX_SHM_BASE && ofst+n<=(UNIX_SHM_BASE+SQLITE_SHM_NLOCK)) + ); + if( ofst==UNIX_SHM_DMS ){ + assert( pShmNode->nRef>0 || unixMutexHeld() ); + assert( pShmNode->nRef==0 || sqlite3_mutex_held(pShmNode->pShmMutex) ); + }else{ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int ii; + for(ii=ofst-UNIX_SHM_BASE; iiaMutex[ii]) ); + } +#else + assert( sqlite3_mutex_held(pShmNode->pShmMutex) ); + assert( pShmNode->nRef>0 ); +#endif + } /* Shared locks never span more than one byte */ assert( n==1 || lockType!=F_RDLCK ); /* Locks are within range */ assert( n>=1 && n<=SQLITE_SHM_NLOCK ); + assert( ofst>=UNIX_SHM_BASE && ofst<=(UNIX_SHM_DMS+SQLITE_SHM_NLOCK) ); if( pShmNode->hShm>=0 ){ int res; @@ -39974,7 +42642,7 @@ static int unixShmSystemLock( f.l_len = n; res = osSetPosixAdvisoryLock(pShmNode->hShm, &f, pFile); if( res==-1 ){ -#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +#if defined(SQLITE_ENABLE_SETLK_TIMEOUT) && SQLITE_ENABLE_SETLK_TIMEOUT==1 rc = (pFile->iBusyTimeout ? SQLITE_BUSY_TIMEOUT : SQLITE_BUSY); #else rc = SQLITE_BUSY; @@ -39982,39 +42650,28 @@ static int unixShmSystemLock( } } - /* Update the global lock state and do debug tracing */ + /* Do debug tracing */ #ifdef SQLITE_DEBUG - { u16 mask; OSTRACE(("SHM-LOCK ")); - mask = ofst>31 ? 0xffff : (1<<(ofst+n)) - (1<exclMask &= ~mask; - pShmNode->sharedMask &= ~mask; + OSTRACE(("unlock %d..%d ok\n", ofst, ofst+n-1)); }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock %d ok", ofst)); - pShmNode->exclMask &= ~mask; - pShmNode->sharedMask |= mask; + OSTRACE(("read-lock %d..%d ok\n", ofst, ofst+n-1)); }else{ assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d ok", ofst)); - pShmNode->exclMask |= mask; - pShmNode->sharedMask &= ~mask; + OSTRACE(("write-lock %d..%d ok\n", ofst, ofst+n-1)); } }else{ if( lockType==F_UNLCK ){ - OSTRACE(("unlock %d failed", ofst)); + OSTRACE(("unlock %d..%d failed\n", ofst, ofst+n-1)); }else if( lockType==F_RDLCK ){ - OSTRACE(("read-lock failed")); + OSTRACE(("read-lock %d..%d failed\n", ofst, ofst+n-1)); }else{ assert( lockType==F_WRLCK ); - OSTRACE(("write-lock %d failed", ofst)); + OSTRACE(("write-lock %d..%d failed\n", ofst, ofst+n-1)); } } - OSTRACE((" - afterwards %03x,%03x\n", - pShmNode->sharedMask, pShmNode->exclMask)); - } #endif return rc; @@ -40051,6 +42708,11 @@ static void unixShmPurge(unixFile *pFd){ int i; assert( p->pInode==pFd->pInode ); sqlite3_mutex_free(p->pShmMutex); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + for(i=0; iaMutex[i]); + } +#endif for(i=0; inRegion; i+=nShmPerMap){ if( p->hShm>=0 ){ osMunmap(p->apRegion[i], p->szRegion); @@ -40110,7 +42772,20 @@ static int unixLockSharedMemory(unixFile *pDbFd, unixShmNode *pShmNode){ pShmNode->isUnlocked = 1; rc = SQLITE_READONLY_CANTINIT; }else{ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + /* Do not use a blocking lock here. If the lock cannot be obtained + ** immediately, it means some other connection is truncating the + ** *-shm file. And after it has done so, it will not release its + ** lock, but only downgrade it to a shared lock. So no point in + ** blocking here. The call below to obtain the shared DMS lock may + ** use a blocking lock. */ + int iSaveTimeout = pDbFd->iBusyTimeout; + pDbFd->iBusyTimeout = 0; +#endif rc = unixShmSystemLock(pDbFd, F_WRLCK, UNIX_SHM_DMS, 1); +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + pDbFd->iBusyTimeout = iSaveTimeout; +#endif /* The first connection to attach must truncate the -shm file. We ** truncate to 3 bytes (an arbitrary small number, less than the ** -shm header size) rather than 0 as a system debugging aid, to @@ -40231,6 +42906,18 @@ static int unixOpenSharedMemory(unixFile *pDbFd){ rc = SQLITE_NOMEM_BKPT; goto shm_open_err; } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + { + int ii; + for(ii=0; iiaMutex[ii] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + if( pShmNode->aMutex[ii]==0 ){ + rc = SQLITE_NOMEM_BKPT; + goto shm_open_err; + } + } + } +#endif } if( pInode->bProcessLock==0 ){ @@ -40452,9 +43139,11 @@ static int unixShmMap( */ #ifdef SQLITE_DEBUG static int assertLockingArrayOk(unixShmNode *pShmNode){ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + return 1; +#else unixShm *pX; int aLock[SQLITE_SHM_NLOCK]; - assert( sqlite3_mutex_held(pShmNode->pShmMutex) ); memset(aLock, 0, sizeof(aLock)); for(pX=pShmNode->pFirst; pX; pX=pX->pNext){ @@ -40472,13 +43161,14 @@ static int assertLockingArrayOk(unixShmNode *pShmNode){ assert( 0==memcmp(pShmNode->aLock, aLock, sizeof(aLock)) ); return (memcmp(pShmNode->aLock, aLock, sizeof(aLock))==0); +#endif } #endif /* ** Change the lock state for a shared-memory segment. ** -** Note that the relationship between SHAREd and EXCLUSIVE locks is a little +** Note that the relationship between SHARED and EXCLUSIVE locks is a little ** different here than in posix. In xShmLock(), one can go from unlocked ** to shared and back or from unlocked to exclusive and back. But one may ** not go from shared to exclusive or from exclusive to shared. @@ -40493,7 +43183,7 @@ static int unixShmLock( unixShm *p; /* The shared memory being locked */ unixShmNode *pShmNode; /* The underlying file iNode */ int rc = SQLITE_OK; /* Result code */ - u16 mask; /* Mask of locks to take or release */ + u16 mask = (1<<(ofst+n)) - (1<pShm; @@ -40528,88 +43218,151 @@ static int unixShmLock( ** It is not permitted to block on the RECOVER lock. */ #ifdef SQLITE_ENABLE_SETLK_TIMEOUT - assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( - (ofst!=2) /* not RECOVER */ - && (ofst!=1 || (p->exclMask|p->sharedMask)==0) - && (ofst!=0 || (p->exclMask|p->sharedMask)<3) - && (ofst<3 || (p->exclMask|p->sharedMask)<(1<exclMask|p->sharedMask); + assert( (flags & SQLITE_SHM_UNLOCK) || pDbFd->iBusyTimeout==0 || ( + (ofst!=2) /* not RECOVER */ + && (ofst!=1 || lockMask==0 || lockMask==2) + && (ofst!=0 || lockMask<3) + && (ofst<3 || lockMask<(1<1 || mask==(1<pShmMutex); - assert( assertLockingArrayOk(pShmNode) ); - if( flags & SQLITE_SHM_UNLOCK ){ - if( (p->exclMask|p->sharedMask) & mask ){ - int ii; - int bUnlock = 1; + /* Check if there is any work to do. There are three cases: + ** + ** a) An unlock operation where there are locks to unlock, + ** b) An shared lock where the requested lock is not already held + ** c) An exclusive lock where the requested lock is not already held + ** + ** The SQLite core never requests an exclusive lock that it already holds. + ** This is assert()ed below. + */ + assert( flags!=(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK) + || 0==(p->exclMask & mask) + ); + if( ((flags & SQLITE_SHM_UNLOCK) && ((p->exclMask|p->sharedMask) & mask)) + || (flags==(SQLITE_SHM_SHARED|SQLITE_SHM_LOCK) && 0==(p->sharedMask & mask)) + || (flags==(SQLITE_SHM_EXCLUSIVE|SQLITE_SHM_LOCK)) + ){ - for(ii=ofst; ii((p->sharedMask & (1<aMutex[iMutex]); + if( rc!=SQLITE_OK ) goto leave_shmnode_mutexes; + }else{ + sqlite3_mutex_enter(pShmNode->aMutex[iMutex]); } + } +#else + sqlite3_mutex_enter(pShmNode->pShmMutex); +#endif - if( bUnlock ){ - rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); - if( rc==SQLITE_OK ){ - memset(&aLock[ofst], 0, sizeof(int)*n); + if( ALWAYS(rc==SQLITE_OK) ){ + if( flags & SQLITE_SHM_UNLOCK ){ + /* Case (a) - unlock. */ + int bUnlock = 1; + assert( (p->exclMask & p->sharedMask)==0 ); + assert( !(flags & SQLITE_SHM_EXCLUSIVE) || (p->exclMask & mask)==mask ); + assert( !(flags & SQLITE_SHM_SHARED) || (p->sharedMask & mask)==mask ); + + /* If this is a SHARED lock being unlocked, it is possible that other + ** clients within this process are holding the same SHARED lock. In + ** this case, set bUnlock to 0 so that the posix lock is not removed + ** from the file-descriptor below. */ + if( flags & SQLITE_SHM_SHARED ){ + assert( n==1 ); + assert( aLock[ofst]>=1 ); + if( aLock[ofst]>1 ){ + bUnlock = 0; + aLock[ofst]--; + p->sharedMask &= ~mask; + } } - }else if( ALWAYS(p->sharedMask & (1<1 ); - aLock[ofst]--; - } - /* Undo the local locks */ - if( rc==SQLITE_OK ){ - p->exclMask &= ~mask; - p->sharedMask &= ~mask; - } - } - }else if( flags & SQLITE_SHM_SHARED ){ - assert( n==1 ); - assert( (p->exclMask & (1<sharedMask & mask)==0 ){ - if( aLock[ofst]<0 ){ - rc = SQLITE_BUSY; - }else if( aLock[ofst]==0 ){ - rc = unixShmSystemLock(pDbFd, F_RDLCK, ofst+UNIX_SHM_BASE, n); - } + if( bUnlock ){ + rc = unixShmSystemLock(pDbFd, F_UNLCK, ofst+UNIX_SHM_BASE, n); + if( rc==SQLITE_OK ){ + memset(&aLock[ofst], 0, sizeof(int)*n); + p->sharedMask &= ~mask; + p->exclMask &= ~mask; + } + } + }else if( flags & SQLITE_SHM_SHARED ){ + /* Case (b) - a shared lock. */ - /* Get the local shared locks */ - if( rc==SQLITE_OK ){ - p->sharedMask |= mask; - aLock[ofst]++; - } - } - }else{ - /* Make sure no sibling connections hold locks that will block this - ** lock. If any do, return SQLITE_BUSY right away. */ - int ii; - for(ii=ofst; iisharedMask & mask)==0 ); - if( ALWAYS((p->exclMask & (1<sharedMask |= mask; + aLock[ofst]++; + } + }else{ + /* Case (c) - an exclusive lock. */ + int ii; + + assert( flags==(SQLITE_SHM_LOCK|SQLITE_SHM_EXCLUSIVE) ); assert( (p->sharedMask & mask)==0 ); - p->exclMask |= mask; + assert( (p->exclMask & mask)==0 ); + + /* Make sure no sibling connections hold locks that will block this + ** lock. If any do, return SQLITE_BUSY right away. */ for(ii=ofst; iiexclMask |= mask; + for(ii=ofst; ii=ofst; iMutex--){ + sqlite3_mutex_leave(pShmNode->aMutex[iMutex]); } +#else + sqlite3_mutex_leave(pShmNode->pShmMutex); +#endif } - assert( assertLockingArrayOk(pShmNode) ); - sqlite3_mutex_leave(pShmNode->pShmMutex); + OSTRACE(("SHM-LOCK shmid-%d, pid-%d got %03x,%03x\n", p->id, osGetpid(0), p->sharedMask, p->exclMask)); return rc; @@ -40859,11 +43612,16 @@ static int unixFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ + /* Ensure that there is always at least a 256 byte buffer of addressable + ** memory following the returned page. If the database is corrupt, + ** SQLite may overread the page slightly (in practice only a few bytes, + ** but 256 is safe, round, number). */ + const int nEofBuffer = 256; if( pFd->pMapRegion==0 ){ int rc = unixMapfile(pFd, -1); if( rc!=SQLITE_OK ) return rc; } - if( pFd->mmapSize >= iOff+nAmt ){ + if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){ *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; } @@ -42039,12 +44797,10 @@ static void appendOnePathElement( if( zName[0]=='.' ){ if( nName==1 ) return; if( zName[1]=='.' && nName==2 ){ - if( pPath->nUsed<=1 ){ - pPath->rc = SQLITE_ERROR; - return; + if( pPath->nUsed>1 ){ + assert( pPath->zOut[0]=='/' ); + while( pPath->zOut[--pPath->nUsed]!='/' ){} } - assert( pPath->zOut[0]=='/' ); - while( pPath->zOut[--pPath->nUsed]!='/' ){} return; } } @@ -42256,12 +45012,17 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){ ** than the argument. */ static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){ -#if OS_VXWORKS +#if !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP+0 struct timespec sp; - sp.tv_sec = microseconds / 1000000; sp.tv_nsec = (microseconds % 1000000) * 1000; + + /* Almost all modern unix systems support nanosleep(). But if you are + ** compiling for one of the rare exceptions, you can use + ** -DHAVE_NANOSLEEP=0 (perhaps in conjuction with -DHAVE_USLEEP if + ** usleep() is available) in order to bypass the use of nanosleep() */ nanosleep(&sp, NULL); + UNUSED_PARAMETER(NotUsed); return microseconds; #elif defined(HAVE_USLEEP) && HAVE_USLEEP @@ -43638,8 +46399,16 @@ SQLITE_API int sqlite3_os_init(void){ /* Register all VFSes defined in the aVfs[] array */ for(i=0; i<(sizeof(aVfs)/sizeof(sqlite3_vfs)); i++){ +#ifdef SQLITE_DEFAULT_UNIX_VFS + sqlite3_vfs_register(&aVfs[i], + 0==strcmp(aVfs[i].zName,SQLITE_DEFAULT_UNIX_VFS)); +#else sqlite3_vfs_register(&aVfs[i], i==0); +#endif } +#ifdef SQLITE_OS_KV_OPTIONAL + sqlite3KvvfsInit(); +#endif unixBigLock = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); #ifndef SQLITE_OMIT_WAL @@ -44843,7 +47612,7 @@ static struct win_syscall { /* ** This is the xSetSystemCall() method of sqlite3_vfs for all of the -** "win32" VFSes. Return SQLITE_OK opon successfully updating the +** "win32" VFSes. Return SQLITE_OK upon successfully updating the ** system call pointer, or SQLITE_NOTFOUND if there is no configurable ** system call named zName. */ @@ -46423,7 +49192,7 @@ static int winRead( pFile->h, pBuf, amt, offset, pFile->locktype)); #if SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this read request as possible by transfering + /* Deal with as much of this read request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ @@ -46501,7 +49270,7 @@ static int winWrite( pFile->h, pBuf, amt, offset, pFile->locktype)); #if defined(SQLITE_MMAP_READWRITE) && SQLITE_MAX_MMAP_SIZE>0 - /* Deal with as much of this write request as possible by transfering + /* Deal with as much of this write request as possible by transferring ** data from the memory mapping using memcpy(). */ if( offsetmmapSize ){ if( offset+amt <= pFile->mmapSize ){ @@ -46611,7 +49380,7 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ ** all references to memory-mapped content are closed. That is doable, ** but involves adding a few branches in the common write code path which ** could slow down normal operations slightly. Hence, we have decided for - ** now to simply make trancations a no-op if there are pending reads. We + ** now to simply make transactions a no-op if there are pending reads. We ** can maybe revisit this decision in the future. */ return SQLITE_OK; @@ -46670,7 +49439,7 @@ static int winTruncate(sqlite3_file *id, sqlite3_int64 nByte){ #ifdef SQLITE_TEST /* ** Count the number of fullsyncs and normal syncs. This is used to test -** that syncs and fullsyncs are occuring at the right times. +** that syncs and fullsyncs are occurring at the right times. */ SQLITE_API int sqlite3_sync_count = 0; SQLITE_API int sqlite3_fullsync_count = 0; @@ -47027,7 +49796,7 @@ static int winLock(sqlite3_file *id, int locktype){ */ if( locktype==EXCLUSIVE_LOCK && res ){ assert( pFile->locktype>=SHARED_LOCK ); - res = winUnlockReadLock(pFile); + (void)winUnlockReadLock(pFile); res = winLockFile(&pFile->h, SQLITE_LOCKFILE_FLAGS, SHARED_FIRST, 0, SHARED_SIZE, 0); if( res ){ @@ -48205,6 +50974,11 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ #if SQLITE_MAX_MMAP_SIZE>0 if( pFd->mmapSizeMax>0 ){ + /* Ensure that there is always at least a 256 byte buffer of addressable + ** memory following the returned page. If the database is corrupt, + ** SQLite may overread the page slightly (in practice only a few bytes, + ** but 256 is safe, round, number). */ + const int nEofBuffer = 256; if( pFd->pMapRegion==0 ){ int rc = winMapfile(pFd, -1); if( rc!=SQLITE_OK ){ @@ -48213,7 +50987,7 @@ static int winFetch(sqlite3_file *fd, i64 iOff, int nAmt, void **pp){ return rc; } } - if( pFd->mmapSize >= iOff+nAmt ){ + if( pFd->mmapSize >= (iOff+nAmt+nEofBuffer) ){ assert( pFd->pMapRegion!=0 ); *pp = &((u8 *)pFd->pMapRegion)[iOff]; pFd->nFetchOut++; @@ -48409,9 +51183,10 @@ static int winMakeEndInDirSep(int nBuf, char *zBuf){ } /* -** If sqlite3_temp_directory is not, take the mutex and return true. +** If sqlite3_temp_directory is defined, take the mutex and return true. ** -** If sqlite3_temp_directory is NULL, omit the mutex and return false. +** If sqlite3_temp_directory is NULL (undefined), omit the mutex and +** return false. */ static int winTempDirDefined(void){ sqlite3_mutex_enter(sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR)); @@ -48430,6 +51205,7 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789"; size_t i, j; + DWORD pid; int nPre = sqlite3Strlen30(SQLITE_TEMP_FILE_PREFIX); int nMax, nBuf, nDir, nLen; char *zBuf; @@ -48642,7 +51418,10 @@ static int winGetTempname(sqlite3_vfs *pVfs, char **pzBuf){ j = sqlite3Strlen30(zBuf); sqlite3_randomness(15, &zBuf[j]); + pid = osGetCurrentProcessId(); for(i=0; i<15; i++, j++){ + zBuf[j] += pid & 0xff; + pid >>= 8; zBuf[j] = (char)zChars[ ((unsigned char)zBuf[j])%(sizeof(zChars)-1) ]; } zBuf[j] = 0; @@ -48880,7 +51659,7 @@ static int winOpen( if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; } @@ -48897,7 +51676,7 @@ static int winOpen( if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; } @@ -48917,7 +51696,7 @@ static int winOpen( if( isReadWrite ){ int rc2, isRO = 0; sqlite3BeginBenignMalloc(); - rc2 = winAccess(pVfs, zName, SQLITE_ACCESS_READ, &isRO); + rc2 = winAccess(pVfs, zUtf8Name, SQLITE_ACCESS_READ, &isRO); sqlite3EndBenignMalloc(); if( rc2==SQLITE_OK && isRO ) break; } @@ -49140,6 +51919,13 @@ static int winAccess( OSTRACE(("ACCESS name=%s, flags=%x, pResOut=%p\n", zFilename, flags, pResOut)); + if( zFilename==0 ){ + *pResOut = 0; + OSTRACE(("ACCESS name=%s, pResOut=%p, *pResOut=%d, rc=SQLITE_OK\n", + zFilename, pResOut, *pResOut)); + return SQLITE_OK; + } + zConverted = winConvertFromUtf8Filename(zFilename); if( zConverted==0 ){ OSTRACE(("ACCESS name=%s, rc=SQLITE_IOERR_NOMEM\n", zFilename)); @@ -49447,7 +52233,8 @@ static int winFullPathname( char *zFull /* Output buffer */ ){ int rc; - sqlite3_mutex *pMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR); + MUTEX_LOGIC( sqlite3_mutex *pMutex; ) + MUTEX_LOGIC( pMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_TEMPDIR); ) sqlite3_mutex_enter(pMutex); rc = winFullPathnameNoMutex(pVfs, zRelative, nFull, zFull); sqlite3_mutex_leave(pMutex); @@ -49989,6 +52776,7 @@ static int memdbTruncate(sqlite3_file*, sqlite3_int64 size); static int memdbSync(sqlite3_file*, int flags); static int memdbFileSize(sqlite3_file*, sqlite3_int64 *pSize); static int memdbLock(sqlite3_file*, int); +static int memdbUnlock(sqlite3_file*, int); /* static int memdbCheckReservedLock(sqlite3_file*, int *pResOut);// not used */ static int memdbFileControl(sqlite3_file*, int op, void *pArg); /* static int memdbSectorSize(sqlite3_file*); // not used */ @@ -50047,7 +52835,7 @@ static const sqlite3_io_methods memdb_io_methods = { memdbSync, /* xSync */ memdbFileSize, /* xFileSize */ memdbLock, /* xLock */ - memdbLock, /* xUnlock - same as xLock in this case */ + memdbUnlock, /* xUnlock */ 0, /* memdbCheckReservedLock, */ /* xCheckReservedLock */ memdbFileControl, /* xFileControl */ 0, /* memdbSectorSize,*/ /* xSectorSize */ @@ -50248,39 +53036,81 @@ static int memdbLock(sqlite3_file *pFile, int eLock){ MemFile *pThis = (MemFile*)pFile; MemStore *p = pThis->pStore; int rc = SQLITE_OK; - if( eLock==pThis->eLock ) return SQLITE_OK; + if( eLock<=pThis->eLock ) return SQLITE_OK; memdbEnter(p); - if( eLock>SQLITE_LOCK_SHARED ){ - if( p->mFlags & SQLITE_DESERIALIZE_READONLY ){ - rc = SQLITE_READONLY; - }else if( pThis->eLock<=SQLITE_LOCK_SHARED ){ - if( p->nWrLock ){ - rc = SQLITE_BUSY; - }else{ - p->nWrLock = 1; + + assert( p->nWrLock==0 || p->nWrLock==1 ); + assert( pThis->eLock<=SQLITE_LOCK_SHARED || p->nWrLock==1 ); + assert( pThis->eLock==SQLITE_LOCK_NONE || p->nRdLock>=1 ); + + if( eLock>SQLITE_LOCK_SHARED && (p->mFlags & SQLITE_DESERIALIZE_READONLY) ){ + rc = SQLITE_READONLY; + }else{ + switch( eLock ){ + case SQLITE_LOCK_SHARED: { + assert( pThis->eLock==SQLITE_LOCK_NONE ); + if( p->nWrLock>0 ){ + rc = SQLITE_BUSY; + }else{ + p->nRdLock++; + } + break; + }; + + case SQLITE_LOCK_RESERVED: + case SQLITE_LOCK_PENDING: { + assert( pThis->eLock>=SQLITE_LOCK_SHARED ); + if( ALWAYS(pThis->eLock==SQLITE_LOCK_SHARED) ){ + if( p->nWrLock>0 ){ + rc = SQLITE_BUSY; + }else{ + p->nWrLock = 1; + } + } + break; + } + + default: { + assert( eLock==SQLITE_LOCK_EXCLUSIVE ); + assert( pThis->eLock>=SQLITE_LOCK_SHARED ); + if( p->nRdLock>1 ){ + rc = SQLITE_BUSY; + }else if( pThis->eLock==SQLITE_LOCK_SHARED ){ + p->nWrLock = 1; + } + break; } } - }else if( eLock==SQLITE_LOCK_SHARED ){ - if( pThis->eLock > SQLITE_LOCK_SHARED ){ - assert( p->nWrLock==1 ); - p->nWrLock = 0; - }else if( p->nWrLock ){ - rc = SQLITE_BUSY; - }else{ - p->nRdLock++; + } + if( rc==SQLITE_OK ) pThis->eLock = eLock; + memdbLeave(p); + return rc; +} + +/* +** Unlock an memdb-file. +*/ +static int memdbUnlock(sqlite3_file *pFile, int eLock){ + MemFile *pThis = (MemFile*)pFile; + MemStore *p = pThis->pStore; + if( eLock>=pThis->eLock ) return SQLITE_OK; + memdbEnter(p); + + assert( eLock==SQLITE_LOCK_SHARED || eLock==SQLITE_LOCK_NONE ); + if( eLock==SQLITE_LOCK_SHARED ){ + if( ALWAYS(pThis->eLock>SQLITE_LOCK_SHARED) ){ + p->nWrLock--; } }else{ - assert( eLock==SQLITE_LOCK_NONE ); if( pThis->eLock>SQLITE_LOCK_SHARED ){ - assert( p->nWrLock==1 ); - p->nWrLock = 0; + p->nWrLock--; } - assert( p->nRdLock>0 ); p->nRdLock--; } - if( rc==SQLITE_OK ) pThis->eLock = eLock; + + pThis->eLock = eLock; memdbLeave(p); - return rc; + return SQLITE_OK; } #if 0 @@ -50390,7 +53220,7 @@ static int memdbOpen( memset(pFile, 0, sizeof(*pFile)); szName = sqlite3Strlen30(zName); - if( szName>1 && zName[0]=='/' ){ + if( szName>1 && (zName[0]=='/' || zName[0]=='\\') ){ int i; #ifndef SQLITE_MUTEX_OMIT sqlite3_mutex *pVfsMutex = sqlite3MutexAlloc(SQLITE_MUTEX_STATIC_VFS1); @@ -50636,6 +53466,14 @@ SQLITE_API unsigned char *sqlite3_serialize( pOut = 0; }else{ sz = sqlite3_column_int64(pStmt, 0)*szPage; + if( sz==0 ){ + sqlite3_reset(pStmt); + sqlite3_exec(db, "BEGIN IMMEDIATE; COMMIT;", 0, 0, 0); + rc = sqlite3_step(pStmt); + if( rc==SQLITE_ROW ){ + sz = sqlite3_column_int64(pStmt, 0)*szPage; + } + } if( piSize ) *piSize = sz; if( mFlags & SQLITE_SERIALIZE_NOCOPY ){ pOut = 0; @@ -50737,6 +53575,13 @@ SQLITE_API int sqlite3_deserialize( return rc; } +/* +** Return true if the VFS is the memvfs. +*/ +SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs *pVfs){ + return pVfs==&memdb_vfs; +} + /* ** This routine is called when the extension is loaded. ** Register the new VFS. @@ -50949,7 +53794,7 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec *p, u32 i){ h = BITVEC_HASH(i++); /* if there wasn't a hash collision, and this doesn't */ /* completely fill the hash, then just add it without */ - /* worring about sub-dividing and re-hashing. */ + /* worrying about sub-dividing and re-hashing. */ if( !p->u.aHash[h] ){ if (p->nSet<(BITVEC_NINT-1)) { goto bitvec_set_end; @@ -51216,7 +54061,7 @@ SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int sz, int *aOp){ struct PCache { PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */ PgHdr *pSynced; /* Last synced page in dirty page list */ - int nRefSum; /* Sum of ref counts over all pages */ + i64 nRefSum; /* Sum of ref counts over all pages */ int szCache; /* Configured cache size */ int szSpill; /* Size before spilling occurs */ int szPage; /* Size of every page in this cache */ @@ -51241,12 +54086,24 @@ struct PCache { int sqlite3PcacheTrace = 2; /* 0: off 1: simple 2: cache dumps */ int sqlite3PcacheMxDump = 9999; /* Max cache entries for pcacheDump() */ # define pcacheTrace(X) if(sqlite3PcacheTrace){sqlite3DebugPrintf X;} - void pcacheDump(PCache *pCache){ - int N; - int i, j; - sqlite3_pcache_page *pLower; + static void pcachePageTrace(int i, sqlite3_pcache_page *pLower){ PgHdr *pPg; unsigned char *a; + int j; + if( pLower==0 ){ + printf("%3d: NULL\n", i); + }else{ + pPg = (PgHdr*)pLower->pExtra; + printf("%3d: nRef %2lld flgs %02x data ", i, pPg->nRef, pPg->flags); + a = (unsigned char *)pLower->pBuf; + for(j=0; j<12; j++) printf("%02x", a[j]); + printf(" ptr %p\n", pPg); + } + } + static void pcacheDump(PCache *pCache){ + int N; + int i; + sqlite3_pcache_page *pLower; if( sqlite3PcacheTrace<2 ) return; if( pCache->pCache==0 ) return; @@ -51254,22 +54111,42 @@ struct PCache { if( N>sqlite3PcacheMxDump ) N = sqlite3PcacheMxDump; for(i=1; i<=N; i++){ pLower = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, i, 0); - if( pLower==0 ) continue; - pPg = (PgHdr*)pLower->pExtra; - printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags); - a = (unsigned char *)pLower->pBuf; - for(j=0; j<12; j++) printf("%02x", a[j]); - printf("\n"); - if( pPg->pPage==0 ){ + pcachePageTrace(i, pLower); + if( pLower && ((PgHdr*)pLower)->pPage==0 ){ sqlite3GlobalConfig.pcache2.xUnpin(pCache->pCache, pLower, 0); } } } - #else +#else # define pcacheTrace(X) +# define pcachePageTrace(PGNO, X) # define pcacheDump(X) #endif +/* +** Return 1 if pPg is on the dirty list for pCache. Return 0 if not. +** This routine runs inside of assert() statements only. +*/ +#if defined(SQLITE_ENABLE_EXPENSIVE_ASSERT) +static int pageOnDirtyList(PCache *pCache, PgHdr *pPg){ + PgHdr *p; + for(p=pCache->pDirty; p; p=p->pDirtyNext){ + if( p==pPg ) return 1; + } + return 0; +} +static int pageNotOnDirtyList(PCache *pCache, PgHdr *pPg){ + PgHdr *p; + for(p=pCache->pDirty; p; p=p->pDirtyNext){ + if( p==pPg ) return 0; + } + return 1; +} +#else +# define pageOnDirtyList(A,B) 1 +# define pageNotOnDirtyList(A,B) 1 +#endif + /* ** Check invariants on a PgHdr entry. Return true if everything is OK. ** Return false if any invariant is violated. @@ -51288,8 +54165,13 @@ SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){ assert( pCache!=0 ); /* Every page has an associated PCache */ if( pPg->flags & PGHDR_CLEAN ){ assert( (pPg->flags & PGHDR_DIRTY)==0 );/* Cannot be both CLEAN and DIRTY */ - assert( pCache->pDirty!=pPg ); /* CLEAN pages not on dirty list */ - assert( pCache->pDirtyTail!=pPg ); + assert( pageNotOnDirtyList(pCache, pPg) );/* CLEAN pages not on dirtylist */ + }else{ + assert( (pPg->flags & PGHDR_DIRTY)!=0 );/* If not CLEAN must be DIRTY */ + assert( pPg->pDirtyNext==0 || pPg->pDirtyNext->pDirtyPrev==pPg ); + assert( pPg->pDirtyPrev==0 || pPg->pDirtyPrev->pDirtyNext==pPg ); + assert( pPg->pDirtyPrev!=0 || pCache->pDirty==pPg ); + assert( pageOnDirtyList(pCache, pPg) ); } /* WRITEABLE pages must also be DIRTY */ if( pPg->flags & PGHDR_WRITEABLE ){ @@ -51419,7 +54301,7 @@ static int numberOfCachePages(PCache *p){ return p->szCache; }else{ i64 n; - /* IMPLEMANTATION-OF: R-59858-46238 If the argument N is negative, then the + /* IMPLEMENTATION-OF: R-59858-46238 If the argument N is negative, then the ** number of cache pages is adjusted to be a number of pages that would ** use approximately abs(N*1024) bytes of memory based on the current ** page size. */ @@ -51563,8 +54445,9 @@ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch( assert( createFlag==0 || pCache->eCreate==eCreate ); assert( createFlag==0 || eCreate==1+(!pCache->bPurgeable||!pCache->pDirty) ); pRes = sqlite3GlobalConfig.pcache2.xFetch(pCache->pCache, pgno, eCreate); - pcacheTrace(("%p.FETCH %d%s (result: %p)\n",pCache,pgno, + pcacheTrace(("%p.FETCH %d%s (result: %p) ",pCache,pgno, createFlag?" create":"",pRes)); + pcachePageTrace(pgno, pRes); return pRes; } @@ -51692,6 +54575,7 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3PcacheRelease(PgHdr *p){ pcacheUnpin(p); }else{ pcacheManageDirtyList(p, PCACHE_DIRTYLIST_FRONT); + assert( sqlite3PcachePageSanity(p) ); } } } @@ -51735,6 +54619,7 @@ SQLITE_PRIVATE void sqlite3PcacheMakeDirty(PgHdr *p){ pcacheTrace(("%p.DIRTY %d\n",p->pCache,p->pgno)); assert( (p->flags & (PGHDR_DIRTY|PGHDR_CLEAN))==PGHDR_DIRTY ); pcacheManageDirtyList(p, PCACHE_DIRTYLIST_ADD); + assert( sqlite3PcachePageSanity(p) ); } assert( sqlite3PcachePageSanity(p) ); } @@ -51904,7 +54789,7 @@ static PgHdr *pcacheMergeDirtyList(PgHdr *pA, PgHdr *pB){ } /* -** Sort the list of pages in accending order by pgno. Pages are +** Sort the list of pages in ascending order by pgno. Pages are ** connected by pDirty pointers. The pDirtyPrev pointers are ** corrupted by this sort. ** @@ -51963,14 +54848,14 @@ SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache *pCache){ ** This is not the total number of pages referenced, but the sum of the ** reference count for all pages. */ -SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){ +SQLITE_PRIVATE i64 sqlite3PcacheRefCount(PCache *pCache){ return pCache->nRefSum; } /* ** Return the number of references to the page supplied as an argument. */ -SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){ +SQLITE_PRIVATE i64 sqlite3PcachePageRefcount(PgHdr *p){ return p->nRef; } @@ -52112,12 +54997,13 @@ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHd ** size can vary according to architecture, compile-time options, and ** SQLite library version number. ** -** If SQLITE_PCACHE_SEPARATE_HEADER is defined, then the extension is obtained -** using a separate memory allocation from the database page content. This -** seeks to overcome the "clownshoe" problem (also called "internal -** fragmentation" in academic literature) of allocating a few bytes more -** than a power of two with the memory allocator rounding up to the next -** power of two, and leaving the rounded-up space unused. +** Historical note: It used to be that if the SQLITE_PCACHE_SEPARATE_HEADER +** was defined, then the page content would be held in a separate memory +** allocation from the PgHdr1. This was intended to avoid clownshoe memory +** allocations. However, the btree layer needs a small (16-byte) overrun +** area after the page content buffer. The header serves as that overrun +** area. Therefore SQLITE_PCACHE_SEPARATE_HEADER was discontinued to avoid +** any possibility of a memory error. ** ** This module tracks pointers to PgHdr1 objects. Only pcache.c communicates ** with this module. Information is passed back and forth as PgHdr1 pointers. @@ -52143,7 +55029,7 @@ SQLITE_PRIVATE void sqlite3PcacheIterateDirty(PCache *pCache, void (*xIter)(PgHd ** If N is positive, then N pages worth of memory are allocated using a single ** sqlite3Malloc() call and that memory is used for the first N pages allocated. ** Or if N is negative, then -1024*N bytes of memory are allocated and used -** for as many pages as can be accomodated. +** for as many pages as can be accommodated. ** ** Only one of (2) or (3) can be used. Once the memory available to (2) or ** (3) is exhausted, subsequent allocations fail over to the general-purpose @@ -52162,30 +55048,40 @@ typedef struct PGroup PGroup; /* ** Each cache entry is represented by an instance of the following -** structure. Unless SQLITE_PCACHE_SEPARATE_HEADER is defined, a buffer of -** PgHdr1.pCache->szPage bytes is allocated directly before this structure -** in memory. +** structure. A buffer of PgHdr1.pCache->szPage bytes is allocated +** directly before this structure and is used to cache the page content. ** -** Note: Variables isBulkLocal and isAnchor were once type "u8". That works, +** When reading a corrupt database file, it is possible that SQLite might +** read a few bytes (no more than 16 bytes) past the end of the page buffer. +** It will only read past the end of the page buffer, never write. This +** object is positioned immediately after the page buffer to serve as an +** overrun area, so that overreads are harmless. +** +** Variables isBulkLocal and isAnchor were once type "u8". That works, ** but causes a 2-byte gap in the structure for most architectures (since ** pointers must be either 4 or 8-byte aligned). As this structure is located ** in memory directly after the associated page data, if the database is ** corrupt, code at the b-tree layer may overread the page buffer and ** read part of this structure before the corruption is detected. This -** can cause a valgrind error if the unitialized gap is accessed. Using u16 -** ensures there is no such gap, and therefore no bytes of unitialized memory -** in the structure. +** can cause a valgrind error if the uninitialized gap is accessed. Using u16 +** ensures there is no such gap, and therefore no bytes of uninitialized +** memory in the structure. +** +** The pLruNext and pLruPrev pointers form a double-linked circular list +** of all pages that are unpinned. The PGroup.lru element (which should be +** the only element on the list with PgHdr1.isAnchor set to 1) forms the +** beginning and the end of the list. */ struct PgHdr1 { - sqlite3_pcache_page page; /* Base class. Must be first. pBuf & pExtra */ - unsigned int iKey; /* Key value (page number) */ - u16 isBulkLocal; /* This page from bulk local storage */ - u16 isAnchor; /* This is the PGroup.lru element */ - PgHdr1 *pNext; /* Next in hash table chain */ - PCache1 *pCache; /* Cache that currently owns this page */ - PgHdr1 *pLruNext; /* Next in LRU list of unpinned pages */ - PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ - /* NB: pLruPrev is only valid if pLruNext!=0 */ + sqlite3_pcache_page page; /* Base class. Must be first. pBuf & pExtra */ + unsigned int iKey; /* Key value (page number) */ + u16 isBulkLocal; /* This page from bulk local storage */ + u16 isAnchor; /* This is the PGroup.lru element */ + PgHdr1 *pNext; /* Next in hash table chain */ + PCache1 *pCache; /* Cache that currently owns this page */ + PgHdr1 *pLruNext; /* Next in circular LRU list of unpinned pages */ + PgHdr1 *pLruPrev; /* Previous in LRU list of unpinned pages */ + /* NB: pLruPrev is only valid if pLruNext!=0 */ }; /* @@ -52511,25 +55407,13 @@ static PgHdr1 *pcache1AllocPage(PCache1 *pCache, int benignMalloc){ pcache1LeaveMutex(pCache->pGroup); #endif if( benignMalloc ){ sqlite3BeginBenignMalloc(); } -#ifdef SQLITE_PCACHE_SEPARATE_HEADER - pPg = pcache1Alloc(pCache->szPage); - p = sqlite3Malloc(sizeof(PgHdr1) + pCache->szExtra); - if( !pPg || !p ){ - pcache1Free(pPg); - sqlite3_free(p); - pPg = 0; - } -#else pPg = pcache1Alloc(pCache->szAlloc); -#endif if( benignMalloc ){ sqlite3EndBenignMalloc(); } #ifdef SQLITE_ENABLE_MEMORY_MANAGEMENT pcache1EnterMutex(pCache->pGroup); #endif if( pPg==0 ) return 0; -#ifndef SQLITE_PCACHE_SEPARATE_HEADER p = (PgHdr1 *)&((u8 *)pPg)[pCache->szPage]; -#endif p->page.pBuf = pPg; p->page.pExtra = &p[1]; p->isBulkLocal = 0; @@ -52553,9 +55437,6 @@ static void pcache1FreePage(PgHdr1 *p){ pCache->pFree = p; }else{ pcache1Free(p->page.pBuf); -#ifdef SQLITE_PCACHE_SEPARATE_HEADER - sqlite3_free(p); -#endif } (*pCache->pnPurgeable)--; } @@ -53322,9 +56203,6 @@ SQLITE_PRIVATE int sqlite3PcacheReleaseMemory(int nReq){ && p->isAnchor==0 ){ nFree += pcache1MemSize(p->page.pBuf); -#ifdef SQLITE_PCACHE_SEPARATE_HEADER - nFree += sqlite3MemSize(p); -#endif assert( PAGE_IS_UNPINNED(p) ); pcache1PinPage(p); pcache1RemoveFromHash(p, 1); @@ -53405,7 +56283,7 @@ SQLITE_PRIVATE void sqlite3PcacheStats( ** The TEST primitive includes a "batch" number. The TEST primitive ** will only see elements that were inserted before the last change ** in the batch number. In other words, if an INSERT occurs between -** two TESTs where the TESTs have the same batch nubmer, then the +** two TESTs where the TESTs have the same batch number, then the ** value added by the INSERT will not be visible to the second TEST. ** The initial batch number is zero, so if the very first TEST contains ** a non-zero batch number, it will see all prior INSERTs. @@ -53937,6 +56815,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64 # define sqlite3WalFramesize(z) 0 # define sqlite3WalFindFrame(x,y,z) 0 # define sqlite3WalFile(x) 0 +# undef SQLITE_USE_SEH #else #define WAL_SAVEPOINT_NDATA 4 @@ -54043,6 +56922,10 @@ SQLITE_PRIVATE int sqlite3WalWriteLock(Wal *pWal, int bLock); SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db); #endif +#ifdef SQLITE_USE_SEH +SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal*); +#endif + #endif /* ifndef SQLITE_OMIT_WAL */ #endif /* SQLITE_WAL_H */ @@ -54328,7 +57211,7 @@ int sqlite3PagerTrace=1; /* True to enable tracing */ ** outstanding transactions have been abandoned, the pager is able to ** transition back to OPEN state, discarding the contents of the ** page-cache and any other in-memory state at the same time. Everything -** is reloaded from disk (and, if necessary, hot-journal rollback peformed) +** is reloaded from disk (and, if necessary, hot-journal rollback performed) ** when a read-transaction is next opened on the pager (transitioning ** the pager into READER state). At that point the system has recovered ** from the error. @@ -54731,7 +57614,7 @@ struct Pager { char *zJournal; /* Name of the journal file */ int (*xBusyHandler)(void*); /* Function to call when busy */ void *pBusyHandlerArg; /* Context argument for xBusyHandler */ - int aStat[4]; /* Total cache hits, misses, writes, spills */ + u32 aStat[4]; /* Total cache hits, misses, writes, spills */ #ifdef SQLITE_TEST int nRead; /* Database pages read */ #endif @@ -54874,9 +57757,8 @@ SQLITE_PRIVATE int sqlite3PagerDirectReadOk(Pager *pPager, Pgno pgno){ #ifndef SQLITE_OMIT_WAL if( pPager->pWal ){ u32 iRead = 0; - int rc; - rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); - return (rc==SQLITE_OK && iRead==0); + (void)sqlite3WalFindFrame(pPager->pWal, pgno, &iRead); + return iRead==0; } #endif return 1; @@ -55554,9 +58436,32 @@ static int writeJournalHdr(Pager *pPager){ memset(zHeader, 0, sizeof(aJournalMagic)+4); } + + /* The random check-hash initializer */ - sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); + if( pPager->journalMode!=PAGER_JOURNALMODE_MEMORY ){ + sqlite3_randomness(sizeof(pPager->cksumInit), &pPager->cksumInit); + } +#ifdef SQLITE_DEBUG + else{ + /* The Pager.cksumInit variable is usually randomized above to protect + ** against there being existing records in the journal file. This is + ** dangerous, as following a crash they may be mistaken for records + ** written by the current transaction and rolled back into the database + ** file, causing corruption. The following assert statements verify + ** that this is not required in "journal_mode=memory" mode, as in that + ** case the journal file is always 0 bytes in size at this point. + ** It is advantageous to avoid the sqlite3_randomness() call if possible + ** as it takes the global PRNG mutex. */ + i64 sz = 0; + sqlite3OsFileSize(pPager->jfd, &sz); + assert( sz==0 ); + assert( pPager->journalOff==journalHdrOffset(pPager) ); + assert( sqlite3JournalIsInMemory(pPager->jfd) ); + } +#endif put32bits(&zHeader[sizeof(aJournalMagic)+4], pPager->cksumInit); + /* The initial database size */ put32bits(&zHeader[sizeof(aJournalMagic)+8], pPager->dbOrigSize); /* The assumed sector size for this process */ @@ -55736,7 +58641,7 @@ static int readJournalHdr( ** + 4 bytes: super-journal name checksum. ** + 8 bytes: aJournalMagic[]. ** -** The super-journal page checksum is the sum of the bytes in thesuper-journal +** The super-journal page checksum is the sum of the bytes in the super-journal ** name, where each byte is interpreted as a signed 8-bit integer. ** ** If zSuper is a NULL pointer (occurs for a single database transaction), @@ -55789,7 +58694,7 @@ static int writeSuperJournal(Pager *pPager, const char *zSuper){ } pPager->journalOff += (nSuper+20); - /* If the pager is in peristent-journal mode, then the physical + /* If the pager is in persistent-journal mode, then the physical ** journal-file may extend past the end of the super-journal name ** and 8 bytes of magic data just written to the file. This is ** dangerous because the code to rollback a hot-journal file @@ -55959,7 +58864,7 @@ static void pager_unlock(Pager *pPager){ /* ** This function is called whenever an IOERR or FULL error that requires -** the pager to transition into the ERROR state may ahve occurred. +** the pager to transition into the ERROR state may have occurred. ** The first argument is a pointer to the pager structure, the second ** the error-code about to be returned by a pager API function. The ** value returned is a copy of the second argument to this function. @@ -56200,6 +59105,9 @@ static int pager_end_transaction(Pager *pPager, int hasSuper, int bCommit){ return (rc==SQLITE_OK?rc2:rc); } +/* Forward reference */ +static int pager_playback(Pager *pPager, int isHot); + /* ** Execute a rollback if a transaction is active and unlock the ** database file. @@ -56228,13 +59136,28 @@ static void pagerUnlockAndRollback(Pager *pPager){ assert( pPager->eState==PAGER_READER ); pager_end_transaction(pPager, 0, 0); } + }else if( pPager->eState==PAGER_ERROR + && pPager->journalMode==PAGER_JOURNALMODE_MEMORY + && isOpen(pPager->jfd) + ){ + /* Special case for a ROLLBACK due to I/O error with an in-memory + ** journal: We have to rollback immediately, before the journal is + ** closed, because once it is closed, all content is forgotten. */ + int errCode = pPager->errCode; + u8 eLock = pPager->eLock; + pPager->eState = PAGER_OPEN; + pPager->errCode = SQLITE_OK; + pPager->eLock = EXCLUSIVE_LOCK; + pager_playback(pPager, 1); + pPager->errCode = errCode; + pPager->eLock = eLock; } pager_unlock(pPager); } /* ** Parameter aData must point to a buffer of pPager->pageSize bytes -** of data. Compute and return a checksum based ont the contents of the +** of data. Compute and return a checksum based on the contents of the ** page of data and the current value of pPager->cksumInit. ** ** This is not a real checksum. It is really just the sum of the @@ -56733,6 +59656,8 @@ static int pager_truncate(Pager *pPager, Pgno nPage){ int rc = SQLITE_OK; assert( pPager->eState!=PAGER_ERROR ); assert( pPager->eState!=PAGER_READER ); + PAGERTRACE(("Truncate %d npage %u\n", PAGERID(pPager), nPage)); + if( isOpen(pPager->fd) && (pPager->eState>=PAGER_WRITER_DBMOD || pPager->eState==PAGER_OPEN) @@ -57063,7 +59988,7 @@ static int pager_playback(Pager *pPager, int isHot){ ** see if it is possible to delete the super-journal. */ assert( zSuper==&pPager->pTmpSpace[4] ); - memset(&zSuper[-4], 0, 4); + memset(pPager->pTmpSpace, 0, 4); rc = pager_delsuper(pPager, zSuper); testcase( rc!=SQLITE_OK ); } @@ -57266,7 +60191,7 @@ static int pagerWalFrames( assert( pPager->pWal ); assert( pList ); #ifdef SQLITE_DEBUG - /* Verify that the page list is in accending order */ + /* Verify that the page list is in ascending order */ for(p=pList; p && p->pDirty; p=p->pDirty){ assert( p->pgno < p->pDirty->pgno ); } @@ -57397,7 +60322,7 @@ static int pagerPagecount(Pager *pPager, Pgno *pnPage){ #ifndef SQLITE_OMIT_WAL /* ** Check if the *-wal file that corresponds to the database opened by pPager -** exists if the database is not empy, or verify that the *-wal file does +** exists if the database is not empty, or verify that the *-wal file does ** not exist (by deleting it) if the database file is empty. ** ** If the database is not empty and the *-wal file exists, open the pager @@ -57686,7 +60611,6 @@ SQLITE_PRIVATE void sqlite3PagerShrink(Pager *pPager){ ** Numeric values associated with these states are OFF==1, NORMAL=2, ** and FULL=3. */ -#ifndef SQLITE_OMIT_PAGER_PRAGMAS SQLITE_PRIVATE void sqlite3PagerSetFlags( Pager *pPager, /* The pager to set safety level for */ unsigned pgFlags /* Various flags */ @@ -57721,7 +60645,6 @@ SQLITE_PRIVATE void sqlite3PagerSetFlags( pPager->doNotSpill |= SPILLFLAG_OFF; } } -#endif /* ** The following global variable is incremented whenever the library @@ -58826,11 +61749,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( int rc = SQLITE_OK; /* Return code */ int tempFile = 0; /* True for temp files (incl. in-memory files) */ int memDb = 0; /* True if this is an in-memory file */ -#ifndef SQLITE_OMIT_DESERIALIZE int memJM = 0; /* Memory journal mode */ -#else -# define memJM 0 -#endif int readOnly = 0; /* True if this is a read-only file */ int journalFileSize; /* Bytes to allocate for each journal fd */ char *zPathname = 0; /* Full path to database file */ @@ -58840,7 +61759,6 @@ SQLITE_PRIVATE int sqlite3PagerOpen( u32 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */ const char *zUri = 0; /* URI args to copy */ int nUriByte = 1; /* Number of bytes of URI args at *zUri */ - int nUri = 0; /* Number of URI parameters */ /* Figure out how much space is required for each journal file-handle ** (there are two of them, the main journal and the sub-journal). */ @@ -58888,7 +61806,6 @@ SQLITE_PRIVATE int sqlite3PagerOpen( while( *z ){ z += strlen(z)+1; z += strlen(z)+1; - nUri++; } nUriByte = (int)(&z[1] - zUri); assert( nUriByte>=1 ); @@ -58951,12 +61868,13 @@ SQLITE_PRIVATE int sqlite3PagerOpen( ** specific formatting and order of the various filenames, so if the format ** changes here, be sure to change it there as well. */ + assert( SQLITE_PTRSIZE==sizeof(Pager*) ); pPtr = (u8 *)sqlite3MallocZero( ROUND8(sizeof(*pPager)) + /* Pager structure */ ROUND8(pcacheSize) + /* PCache object */ ROUND8(pVfs->szOsFile) + /* The main db file */ journalFileSize * 2 + /* The two journal files */ - sizeof(pPager) + /* Space to hold a pointer */ + SQLITE_PTRSIZE + /* Space to hold a pointer */ 4 + /* Database prefix */ nPathname + 1 + /* database filename */ nUriByte + /* query parameters */ @@ -58977,7 +61895,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( pPager->sjfd = (sqlite3_file*)pPtr; pPtr += journalFileSize; pPager->jfd = (sqlite3_file*)pPtr; pPtr += journalFileSize; assert( EIGHT_BYTE_ALIGNMENT(pPager->jfd) ); - memcpy(pPtr, &pPager, sizeof(pPager)); pPtr += sizeof(pPager); + memcpy(pPtr, &pPager, SQLITE_PTRSIZE); pPtr += SQLITE_PTRSIZE; /* Fill in the Pager.zFilename and pPager.zQueryParam fields */ pPtr += 4; /* Skip zero prefix */ @@ -59031,9 +61949,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( int fout = 0; /* VFS flags returned by xOpen() */ rc = sqlite3OsOpen(pVfs, pPager->zFilename, pPager->fd, vfsFlags, &fout); assert( !memDb ); -#ifndef SQLITE_OMIT_DESERIALIZE pPager->memVfs = memJM = (fout&SQLITE_OPEN_MEMORY)!=0; -#endif readOnly = (fout&SQLITE_OPEN_READONLY)!=0; /* If the file was successfully opened for read/write access, @@ -59144,18 +62060,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen( pPager->memDb = (u8)memDb; pPager->readOnly = (u8)readOnly; assert( useJournal || pPager->tempFile ); - pPager->noSync = pPager->tempFile; - if( pPager->noSync ){ - assert( pPager->fullSync==0 ); - assert( pPager->extraSync==0 ); - assert( pPager->syncFlags==0 ); - assert( pPager->walSyncFlags==0 ); - }else{ - pPager->fullSync = 1; - pPager->extraSync = 0; - pPager->syncFlags = SQLITE_SYNC_NORMAL; - pPager->walSyncFlags = SQLITE_SYNC_NORMAL | (SQLITE_SYNC_NORMAL<<2); - } + sqlite3PagerSetFlags(pPager, (SQLITE_DEFAULT_SYNCHRONOUS+1)|PAGER_CACHESPILL); /* pPager->pFirst = 0; */ /* pPager->pFirstSynced = 0; */ /* pPager->pLast = 0; */ @@ -59181,15 +62086,18 @@ SQLITE_PRIVATE int sqlite3PagerOpen( /* ** Return the sqlite3_file for the main database given the name -** of the corresonding WAL or Journal name as passed into +** of the corresponding WAL or Journal name as passed into ** xOpen. */ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char *zName){ Pager *pPager; + const char *p; while( zName[-1]!=0 || zName[-2]!=0 || zName[-3]!=0 || zName[-4]!=0 ){ zName--; } - pPager = *(Pager**)(zName - 4 - sizeof(Pager*)); + p = zName - 4 - sizeof(Pager*); + assert( EIGHT_BYTE_ALIGNMENT(p) ); + pPager = *(Pager**)p; return pPager->fd; } @@ -59684,6 +62592,10 @@ static int getPageNormal( if( !isOpen(pPager->fd) || pPager->dbSizepPager->mxPgno ){ rc = SQLITE_FULL; + if( pgno<=pPager->dbSize ){ + sqlite3PcacheRelease(pPg); + pPg = 0; + } goto pager_acquire_err; } if( noContent ){ @@ -59824,8 +62736,20 @@ SQLITE_PRIVATE int sqlite3PagerGet( DbPage **ppPage, /* Write a pointer to the page here */ int flags /* PAGER_GET_XXX flags */ ){ - /* printf("PAGE %u\n", pgno); fflush(stdout); */ +#if 0 /* Trace page fetch by setting to 1 */ + int rc; + printf("PAGE %u\n", pgno); + fflush(stdout); + rc = pPager->xGet(pPager, pgno, ppPage, flags); + if( rc ){ + printf("PAGE %u failed with 0x%02x\n", pgno, rc); + fflush(stdout); + } + return rc; +#else + /* Normal, high-speed version of sqlite3PagerGet() */ return pPager->xGet(pPager, pgno, ppPage, flags); +#endif } /* @@ -59853,10 +62777,12 @@ SQLITE_PRIVATE DbPage *sqlite3PagerLookup(Pager *pPager, Pgno pgno){ /* ** Release a page reference. ** -** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be -** used if we know that the page being released is not the last page. +** The sqlite3PagerUnref() and sqlite3PagerUnrefNotNull() may only be used +** if we know that the page being released is not the last reference to page1. ** The btree layer always holds page1 open until the end, so these first -** to routines can be used to release any page other than BtShared.pPage1. +** two routines can be used to release any page other than BtShared.pPage1. +** The assert() at tag-20230419-2 proves that this constraint is always +** honored. ** ** Use sqlite3PagerUnrefPageOne() to release page1. This latter routine ** checks the total number of outstanding pages and if the number of @@ -59872,7 +62798,7 @@ SQLITE_PRIVATE void sqlite3PagerUnrefNotNull(DbPage *pPg){ sqlite3PcacheRelease(pPg); } /* Do not use this routine to release the last reference to page1 */ - assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); + assert( sqlite3PcacheRefCount(pPager->pPCache)>0 ); /* tag-20230419-2 */ } SQLITE_PRIVATE void sqlite3PagerUnref(DbPage *pPg){ if( pPg ) sqlite3PagerUnrefNotNull(pPg); @@ -59938,6 +62864,7 @@ static int pager_open_journal(Pager *pPager){ if( pPager->tempFile ){ flags |= (SQLITE_OPEN_DELETEONCLOSE|SQLITE_OPEN_TEMP_JOURNAL); + flags |= SQLITE_OPEN_EXCLUSIVE; nSpill = sqlite3Config.nStmtSpill; }else{ flags |= SQLITE_OPEN_MAIN_JOURNAL; @@ -60420,7 +63347,7 @@ static int pager_incr_changecounter(Pager *pPager, int isDirectMode){ # define DIRECT_MODE isDirectMode #endif - if( !pPager->changeCountDone && ALWAYS(pPager->dbSize>0) ){ + if( !pPager->changeCountDone && pPager->dbSize>0 ){ PgHdr *pPgHdr; /* Reference to page 1 */ assert( !pPager->tempFile && isOpen(pPager->fd) ); @@ -60698,6 +63625,13 @@ SQLITE_PRIVATE int sqlite3PagerCommitPhaseOne( rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_BEGIN_ATOMIC_WRITE, 0); if( rc==SQLITE_OK ){ rc = pager_write_pagelist(pPager, pList); + if( rc==SQLITE_OK && pPager->dbSize>pPager->dbFileSize ){ + char *pTmp = pPager->pTmpSpace; + int szPage = (int)pPager->pageSize; + memset(pTmp, 0, szPage); + rc = sqlite3OsWrite(pPager->fd, pTmp, szPage, + ((i64)pPager->dbSize*pPager->pageSize)-szPage); + } if( rc==SQLITE_OK ){ rc = sqlite3OsFileControl(fd, SQLITE_FCNTL_COMMIT_ATOMIC_WRITE, 0); } @@ -60932,11 +63866,11 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ a[3] = pPager->eState==PAGER_OPEN ? -1 : (int) pPager->dbSize; a[4] = pPager->eState; a[5] = pPager->errCode; - a[6] = pPager->aStat[PAGER_STAT_HIT]; - a[7] = pPager->aStat[PAGER_STAT_MISS]; + a[6] = (int)pPager->aStat[PAGER_STAT_HIT] & 0x7fffffff; + a[7] = (int)pPager->aStat[PAGER_STAT_MISS] & 0x7fffffff; a[8] = 0; /* Used to be pPager->nOvfl */ a[9] = pPager->nRead; - a[10] = pPager->aStat[PAGER_STAT_WRITE]; + a[10] = (int)pPager->aStat[PAGER_STAT_WRITE] & 0x7fffffff; return a; } #endif @@ -60952,7 +63886,7 @@ SQLITE_PRIVATE int *sqlite3PagerStats(Pager *pPager){ ** reset parameter is non-zero, the cache hit or miss count is zeroed before ** returning. */ -SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, int *pnVal){ +SQLITE_PRIVATE void sqlite3PagerCacheStat(Pager *pPager, int eStat, int reset, u64 *pnVal){ assert( eStat==SQLITE_DBSTATUS_CACHE_HIT || eStat==SQLITE_DBSTATUS_CACHE_MISS @@ -61160,7 +64094,11 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){ */ SQLITE_PRIVATE const char *sqlite3PagerFilename(const Pager *pPager, int nullIfMemDb){ static const char zFake[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; - return (nullIfMemDb && pPager->memDb) ? &zFake[4] : pPager->zFilename; + if( nullIfMemDb && (pPager->memDb || sqlite3IsMemdb(pPager->pVfs)) ){ + return &zFake[4]; + }else{ + return pPager->zFilename; + } } /* @@ -61184,7 +64122,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3PagerFile(Pager *pPager){ ** This will be either the rollback journal or the WAL file. */ SQLITE_PRIVATE sqlite3_file *sqlite3PagerJrnlFile(Pager *pPager){ -#if SQLITE_OMIT_WAL +#ifdef SQLITE_OMIT_WAL return pPager->jfd; #else return pPager->pWal ? sqlite3WalFile(pPager->pWal) : pPager->jfd; @@ -61503,7 +64441,7 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ assert( pPager->eState!=PAGER_ERROR ); pPager->journalMode = (u8)eMode; - /* When transistioning from TRUNCATE or PERSIST to any other journal + /* When transitioning from TRUNCATE or PERSIST to any other journal ** mode except WAL, unless the pager is in locking_mode=exclusive mode, ** delete the journal file. */ @@ -61548,7 +64486,7 @@ SQLITE_PRIVATE int sqlite3PagerSetJournalMode(Pager *pPager, int eMode){ } assert( state==pPager->eState ); } - }else if( eMode==PAGER_JOURNALMODE_OFF ){ + }else if( eMode==PAGER_JOURNALMODE_OFF || eMode==PAGER_JOURNALMODE_MEMORY ){ sqlite3OsClose(pPager->jfd); } } @@ -61572,7 +64510,7 @@ SQLITE_PRIVATE int sqlite3PagerGetJournalMode(Pager *pPager){ SQLITE_PRIVATE int sqlite3PagerOkToChangeJournalMode(Pager *pPager){ assert( assert_pager_state(pPager) ); if( pPager->eState>=PAGER_WRITER_CACHEMOD ) return 0; - if( isOpen(pPager->jfd) && pPager->journalOff>0 ) return 0; + if( NEVER(isOpen(pPager->jfd) && pPager->journalOff>0) ) return 0; return 1; } @@ -61670,13 +64608,15 @@ SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager){ */ static int pagerExclusiveLock(Pager *pPager){ int rc; /* Return code */ + u8 eOrigLock; /* Original lock */ - assert( pPager->eLock==SHARED_LOCK || pPager->eLock==EXCLUSIVE_LOCK ); + assert( pPager->eLock>=SHARED_LOCK ); + eOrigLock = pPager->eLock; rc = pagerLockDb(pPager, EXCLUSIVE_LOCK); if( rc!=SQLITE_OK ){ /* If the attempt to grab the exclusive lock failed, release the ** pending lock that may have been obtained instead. */ - pagerUnlockDb(pPager, SHARED_LOCK); + pagerUnlockDb(pPager, eOrigLock); } return rc; @@ -61929,6 +64869,12 @@ SQLITE_PRIVATE int sqlite3PagerWalFramesize(Pager *pPager){ } #endif +#if defined(SQLITE_USE_SEH) && !defined(SQLITE_OMIT_WAL) +SQLITE_PRIVATE int sqlite3PagerWalSystemErrno(Pager *pPager){ + return sqlite3WalSystemErrno(pPager->pWal); +} +#endif + #endif /* SQLITE_OMIT_DISKIO */ /* BEGIN SQLCIPHER */ @@ -62240,7 +65186,7 @@ SQLITE_PRIVATE int sqlite3WalTrace = 0; ** ** Technically, the various VFSes are free to implement these locks however ** they see fit. However, compatibility is encouraged so that VFSes can -** interoperate. The standard implemention used on both unix and windows +** interoperate. The standard implementation used on both unix and windows ** is for the index number to indicate a byte offset into the ** WalCkptInfo.aLock[] array in the wal-index header. In other words, all ** locks are on the shm file. The WALINDEX_LOCK_OFFSET constant (which @@ -62316,7 +65262,7 @@ struct WalIndexHdr { ** the mxFrame for that reader. The value READMARK_NOT_USED (0xffffffff) ** for any aReadMark[] means that entry is unused. aReadMark[0] is ** a special case; its value is never used and it exists as a place-holder -** to avoid having to offset aReadMark[] indexs by one. Readers holding +** to avoid having to offset aReadMark[] indexes by one. Readers holding ** WAL_READ_LOCK(0) always ignore the entire WAL and read all content ** directly from the database. ** @@ -62484,7 +65430,15 @@ struct Wal { u32 iReCksum; /* On commit, recalculate checksums from here */ const char *zWalName; /* Name of WAL file */ u32 nCkpt; /* Checkpoint sequence counter in the wal-header */ +#ifdef SQLITE_USE_SEH + u32 lockMask; /* Mask of locks held */ + void *pFree; /* Pointer to sqlite3_free() if exception thrown */ + u32 *pWiValue; /* Value to write into apWiData[iWiPg] */ + int iWiPg; /* Write pWiValue into apWiData[iWiPg] */ + int iSysErrno; /* System error code following exception */ +#endif #ifdef SQLITE_DEBUG + int nSehTry; /* Number of nested SEH_TRY{} blocks */ u8 lockError; /* True if a locking error has occurred */ #endif #ifdef SQLITE_ENABLE_SNAPSHOT @@ -62566,6 +65520,113 @@ struct WalIterator { sizeof(ht_slot)*HASHTABLE_NSLOT + HASHTABLE_NPAGE*sizeof(u32) \ ) +/* +** Structured Exception Handling (SEH) is a Windows-specific technique +** for catching exceptions raised while accessing memory-mapped files. +** +** The -DSQLITE_USE_SEH compile-time option means to use SEH to catch and +** deal with system-level errors that arise during WAL -shm file processing. +** Without this compile-time option, any system-level faults that appear +** while accessing the memory-mapped -shm file will cause a process-wide +** signal to be deliver, which will more than likely cause the entire +** process to exit. +*/ +#ifdef SQLITE_USE_SEH +#include + +/* Beginning of a block of code in which an exception might occur */ +# define SEH_TRY __try { \ + assert( walAssertLockmask(pWal) && pWal->nSehTry==0 ); \ + VVA_ONLY(pWal->nSehTry++); + +/* The end of a block of code in which an exception might occur */ +# define SEH_EXCEPT(X) \ + VVA_ONLY(pWal->nSehTry--); \ + assert( pWal->nSehTry==0 ); \ + } __except( sehExceptionFilter(pWal, GetExceptionCode(), GetExceptionInformation() ) ){ X } + +/* Simulate a memory-mapping fault in the -shm file for testing purposes */ +# define SEH_INJECT_FAULT sehInjectFault(pWal) + +/* +** The second argument is the return value of GetExceptionCode() for the +** current exception. Return EXCEPTION_EXECUTE_HANDLER if the exception code +** indicates that the exception may have been caused by accessing the *-shm +** file mapping. Or EXCEPTION_CONTINUE_SEARCH otherwise. +*/ +static int sehExceptionFilter(Wal *pWal, int eCode, EXCEPTION_POINTERS *p){ + VVA_ONLY(pWal->nSehTry--); + if( eCode==EXCEPTION_IN_PAGE_ERROR ){ + if( p && p->ExceptionRecord && p->ExceptionRecord->NumberParameters>=3 ){ + /* From MSDN: For this type of exception, the first element of the + ** ExceptionInformation[] array is a read-write flag - 0 if the exception + ** was thrown while reading, 1 if while writing. The second element is + ** the virtual address being accessed. The "third array element specifies + ** the underlying NTSTATUS code that resulted in the exception". */ + pWal->iSysErrno = (int)p->ExceptionRecord->ExceptionInformation[2]; + } + return EXCEPTION_EXECUTE_HANDLER; + } + return EXCEPTION_CONTINUE_SEARCH; +} + +/* +** If one is configured, invoke the xTestCallback callback with 650 as +** the argument. If it returns true, throw the same exception that is +** thrown by the system if the *-shm file mapping is accessed after it +** has been invalidated. +*/ +static void sehInjectFault(Wal *pWal){ + int res; + assert( pWal->nSehTry>0 ); + + res = sqlite3FaultSim(650); + if( res!=0 ){ + ULONG_PTR aArg[3]; + aArg[0] = 0; + aArg[1] = 0; + aArg[2] = (ULONG_PTR)res; + RaiseException(EXCEPTION_IN_PAGE_ERROR, 0, 3, (const ULONG_PTR*)aArg); + } +} + +/* +** There are two ways to use this macro. To set a pointer to be freed +** if an exception is thrown: +** +** SEH_FREE_ON_ERROR(0, pPtr); +** +** and to cancel the same: +** +** SEH_FREE_ON_ERROR(pPtr, 0); +** +** In the first case, there must not already be a pointer registered to +** be freed. In the second case, pPtr must be the registered pointer. +*/ +#define SEH_FREE_ON_ERROR(X,Y) \ + assert( (X==0 || Y==0) && pWal->pFree==X ); pWal->pFree = Y + +/* +** There are two ways to use this macro. To arrange for pWal->apWiData[iPg] +** to be set to pValue if an exception is thrown: +** +** SEH_SET_ON_ERROR(iPg, pValue); +** +** and to cancel the same: +** +** SEH_SET_ON_ERROR(0, 0); +*/ +#define SEH_SET_ON_ERROR(X,Y) pWal->iWiPg = X; pWal->pWiValue = Y + +#else +# define SEH_TRY VVA_ONLY(pWal->nSehTry++); +# define SEH_EXCEPT(X) VVA_ONLY(pWal->nSehTry--); assert( pWal->nSehTry==0 ); +# define SEH_INJECT_FAULT assert( pWal->nSehTry>0 ); +# define SEH_FREE_ON_ERROR(X,Y) +# define SEH_SET_ON_ERROR(X,Y) +#endif /* ifdef SQLITE_USE_SEH */ + + /* ** Obtain a pointer to the iPage'th page of the wal-index. The wal-index ** is broken into pages of WALINDEX_PGSZ bytes. Wal-index pages are @@ -62638,6 +65699,7 @@ static int walIndexPage( int iPage, /* The page we seek */ volatile u32 **ppPage /* Write the page pointer here */ ){ + SEH_INJECT_FAULT; if( pWal->nWiData<=iPage || (*ppPage = pWal->apWiData[iPage])==0 ){ return walIndexPageRealloc(pWal, iPage, ppPage); } @@ -62649,6 +65711,7 @@ static int walIndexPage( */ static volatile WalCkptInfo *walCkptInfo(Wal *pWal){ assert( pWal->nWiData>0 && pWal->apWiData[0] ); + SEH_INJECT_FAULT; return (volatile WalCkptInfo*)&(pWal->apWiData[0][sizeof(WalIndexHdr)/2]); } @@ -62657,6 +65720,7 @@ static volatile WalCkptInfo *walCkptInfo(Wal *pWal){ */ static volatile WalIndexHdr *walIndexHdr(Wal *pWal){ assert( pWal->nWiData>0 && pWal->apWiData[0] ); + SEH_INJECT_FAULT; return (volatile WalIndexHdr*)pWal->apWiData[0]; } @@ -62702,19 +65766,40 @@ static void walChecksumBytes( assert( nByte>=8 ); assert( (nByte&0x00000007)==0 ); assert( nByte<=65536 ); + assert( nByte%4==0 ); - if( nativeCksum ){ + if( !nativeCksum ){ do { + s1 += BYTESWAP32(aData[0]) + s2; + s2 += BYTESWAP32(aData[1]) + s1; + aData += 2; + }while( aDatalockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); ) +#ifdef SQLITE_USE_SEH + if( rc==SQLITE_OK ) pWal->lockMask |= (1 << lockIdx); +#endif return rc; } static void walUnlockShared(Wal *pWal, int lockIdx){ if( pWal->exclusiveMode ) return; (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, 1, SQLITE_SHM_UNLOCK | SQLITE_SHM_SHARED); +#ifdef SQLITE_USE_SEH + pWal->lockMask &= ~(1 << lockIdx); +#endif WALTRACE(("WAL%p: release SHARED-%s\n", pWal, walLockName(lockIdx))); } static int walLockExclusive(Wal *pWal, int lockIdx, int n){ @@ -62909,12 +66000,20 @@ static int walLockExclusive(Wal *pWal, int lockIdx, int n){ WALTRACE(("WAL%p: acquire EXCLUSIVE-%s cnt=%d %s\n", pWal, walLockName(lockIdx), n, rc ? "failed" : "ok")); VVA_ONLY( pWal->lockError = (u8)(rc!=SQLITE_OK && (rc&0xFF)!=SQLITE_BUSY); ) +#ifdef SQLITE_USE_SEH + if( rc==SQLITE_OK ){ + pWal->lockMask |= (((1<exclusiveMode ) return; (void)sqlite3OsShmLock(pWal->pDbFd, lockIdx, n, SQLITE_SHM_UNLOCK | SQLITE_SHM_EXCLUSIVE); +#ifdef SQLITE_USE_SEH + pWal->lockMask &= ~(((1<apWiData[0][WALINDEX_HDR_SIZE/sizeof(u32) + iFrame - 1]; } @@ -63265,6 +66365,7 @@ static int walIndexRecover(Wal *pWal){ /* Malloc a buffer to read frames into. */ szFrame = szPage + WAL_FRAME_HDRSIZE; aFrame = (u8 *)sqlite3_malloc64(szFrame + WALINDEX_PGSZ); + SEH_FREE_ON_ERROR(0, aFrame); if( !aFrame ){ rc = SQLITE_NOMEM_BKPT; goto recovery_error; @@ -63283,6 +66384,7 @@ static int walIndexRecover(Wal *pWal){ rc = walIndexPage(pWal, iPg, (volatile u32**)&aShare); assert( aShare!=0 || rc!=SQLITE_OK ); if( aShare==0 ) break; + SEH_SET_ON_ERROR(iPg, aShare); pWal->apWiData[iPg] = aPrivate; for(iFrame=iFirst; iFrame<=iLast; iFrame++){ @@ -63310,6 +66412,7 @@ static int walIndexRecover(Wal *pWal){ } } pWal->apWiData[iPg] = aShare; + SEH_SET_ON_ERROR(0,0); nHdr = (iPg==0 ? WALINDEX_HDR_SIZE : 0); nHdr32 = nHdr / sizeof(u32); #ifndef SQLITE_SAFER_WALINDEX_RECOVERY @@ -63340,9 +66443,11 @@ static int walIndexRecover(Wal *pWal){ } } #endif + SEH_INJECT_FAULT; if( iFrame<=iLast ) break; } + SEH_FREE_ON_ERROR(aFrame, 0); sqlite3_free(aFrame); } @@ -63370,6 +66475,7 @@ static int walIndexRecover(Wal *pWal){ }else{ pInfo->aReadMark[i] = READMARK_NOT_USED; } + SEH_INJECT_FAULT; walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); }else if( rc!=SQLITE_BUSY ){ goto recovery_error; @@ -63527,7 +66633,7 @@ SQLITE_PRIVATE int sqlite3WalOpen( } /* -** Change the size to which the WAL file is trucated on each reset. +** Change the size to which the WAL file is truncated on each reset. */ SQLITE_PRIVATE void sqlite3WalLimit(Wal *pWal, i64 iLimit){ if( pWal ) pWal->mxWalSize = iLimit; @@ -63753,23 +66859,16 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ nByte = sizeof(WalIterator) + (nSegment-1)*sizeof(struct WalSegment) + iLast*sizeof(ht_slot); - p = (WalIterator *)sqlite3_malloc64(nByte); + p = (WalIterator *)sqlite3_malloc64(nByte + + sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) + ); if( !p ){ return SQLITE_NOMEM_BKPT; } memset(p, 0, nByte); p->nSegment = nSegment; - - /* Allocate temporary space used by the merge-sort routine. This block - ** of memory will be freed before this function returns. - */ - aTmp = (ht_slot *)sqlite3_malloc64( - sizeof(ht_slot) * (iLast>HASHTABLE_NPAGE?HASHTABLE_NPAGE:iLast) - ); - if( !aTmp ){ - rc = SQLITE_NOMEM_BKPT; - } - + aTmp = (ht_slot*)&(((u8*)p)[nByte]); + SEH_FREE_ON_ERROR(0, p); for(i=walFramePage(nBackfill+1); rc==SQLITE_OK && iaSegment[i].aPgno = (u32 *)sLoc.aPgno; } } - sqlite3_free(aTmp); - if( rc!=SQLITE_OK ){ + SEH_FREE_ON_ERROR(p, 0); walIteratorFree(p); p = 0; } @@ -63808,6 +66906,19 @@ static int walIteratorInit(Wal *pWal, u32 nBackfill, WalIterator **pp){ } #ifdef SQLITE_ENABLE_SETLK_TIMEOUT + + +/* +** Attempt to enable blocking locks that block for nMs ms. Return 1 if +** blocking locks are successfully enabled, or 0 otherwise. +*/ +static int walEnableBlockingMs(Wal *pWal, int nMs){ + int rc = sqlite3OsFileControl( + pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&nMs + ); + return (rc==SQLITE_OK); +} + /* ** Attempt to enable blocking locks. Blocking locks are enabled only if (a) ** they are supported by the VFS, and (b) the database handle is configured @@ -63819,11 +66930,7 @@ static int walEnableBlocking(Wal *pWal){ if( pWal->db ){ int tmout = pWal->db->busyTimeout; if( tmout ){ - int rc; - rc = sqlite3OsFileControl( - pWal->pDbFd, SQLITE_FCNTL_LOCK_TIMEOUT, (void*)&tmout - ); - res = (rc==SQLITE_OK); + res = walEnableBlockingMs(pWal, tmout); } } return res; @@ -63872,20 +66979,10 @@ SQLITE_PRIVATE void sqlite3WalDb(Wal *pWal, sqlite3 *db){ pWal->db = db; } -/* -** Take an exclusive WRITE lock. Blocking if so configured. -*/ -static int walLockWriter(Wal *pWal){ - int rc; - walEnableBlocking(pWal); - rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1); - walDisableBlocking(pWal); - return rc; -} #else # define walEnableBlocking(x) 0 # define walDisableBlocking(x) -# define walLockWriter(pWal) walLockExclusive((pWal), WAL_WRITE_LOCK, 1) +# define walEnableBlockingMs(pWal, ms) 0 # define sqlite3WalDb(pWal, db) #endif /* ifdef SQLITE_ENABLE_SETLK_TIMEOUT */ @@ -64025,13 +67122,13 @@ static int walCheckpoint( mxSafeFrame = pWal->hdr.mxFrame; mxPage = pWal->hdr.nPage; for(i=1; iaReadMark+i); + u32 y = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT; if( mxSafeFrame>y ){ assert( y<=pWal->hdr.mxFrame ); rc = walBusyLock(pWal, xBusy, pBusyArg, WAL_READ_LOCK(i), 1); if( rc==SQLITE_OK ){ u32 iMark = (i==1 ? mxSafeFrame : READMARK_NOT_USED); - AtomicStore(pInfo->aReadMark+i, iMark); + AtomicStore(pInfo->aReadMark+i, iMark); SEH_INJECT_FAULT; walUnlockExclusive(pWal, WAL_READ_LOCK(i), 1); }else if( rc==SQLITE_BUSY ){ mxSafeFrame = y; @@ -64052,8 +67149,7 @@ static int walCheckpoint( && (rc = walBusyLock(pWal,xBusy,pBusyArg,WAL_READ_LOCK(0),1))==SQLITE_OK ){ u32 nBackfill = pInfo->nBackfill; - - pInfo->nBackfillAttempted = mxSafeFrame; + pInfo->nBackfillAttempted = mxSafeFrame; SEH_INJECT_FAULT; /* Sync the WAL to disk */ rc = sqlite3OsSync(pWal->pWalFd, CKPT_SYNC_FLAGS(sync_flags)); @@ -64084,6 +67180,7 @@ static int walCheckpoint( while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){ i64 iOffset; assert( walFramePgno(pWal, iFrame)==iDbpage ); + SEH_INJECT_FAULT; if( AtomicLoad(&db->u1.isInterrupted) ){ rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT; break; @@ -64113,7 +67210,7 @@ static int walCheckpoint( } } if( rc==SQLITE_OK ){ - AtomicStore(&pInfo->nBackfill, mxSafeFrame); + AtomicStore(&pInfo->nBackfill, mxSafeFrame); SEH_INJECT_FAULT; } } @@ -64135,6 +67232,7 @@ static int walCheckpoint( */ if( rc==SQLITE_OK && eMode!=SQLITE_CHECKPOINT_PASSIVE ){ assert( pWal->writeLock ); + SEH_INJECT_FAULT; if( pInfo->nBackfillhdr.mxFrame ){ rc = SQLITE_BUSY; }else if( eMode>=SQLITE_CHECKPOINT_RESTART ){ @@ -64166,6 +67264,7 @@ static int walCheckpoint( } walcheckpoint_out: + SEH_FREE_ON_ERROR(pIter, 0); walIteratorFree(pIter); return rc; } @@ -64188,6 +67287,93 @@ static void walLimitSize(Wal *pWal, i64 nMax){ } } +#ifdef SQLITE_USE_SEH +/* +** This is the "standard" exception handler used in a few places to handle +** an exception thrown by reading from the *-shm mapping after it has become +** invalid in SQLITE_USE_SEH builds. It is used as follows: +** +** SEH_TRY { ... } +** SEH_EXCEPT( rc = walHandleException(pWal); ) +** +** This function does three things: +** +** 1) Determines the locks that should be held, based on the contents of +** the Wal.readLock, Wal.writeLock and Wal.ckptLock variables. All other +** held locks are assumed to be transient locks that would have been +** released had the exception not been thrown and are dropped. +** +** 2) Frees the pointer at Wal.pFree, if any, using sqlite3_free(). +** +** 3) Set pWal->apWiData[pWal->iWiPg] to pWal->pWiValue if not NULL +** +** 4) Returns SQLITE_IOERR. +*/ +static int walHandleException(Wal *pWal){ + if( pWal->exclusiveMode==0 ){ + static const int S = 1; + static const int E = (1<lockMask & ~( + (pWal->readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock))) + | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0) + | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0) + ); + for(ii=0; iipFree); + pWal->pFree = 0; + if( pWal->pWiValue ){ + pWal->apWiData[pWal->iWiPg] = pWal->pWiValue; + pWal->pWiValue = 0; + } + return SQLITE_IOERR_IN_PAGE; +} + +/* +** Assert that the Wal.lockMask mask, which indicates the locks held +** by the connenction, is consistent with the Wal.readLock, Wal.writeLock +** and Wal.ckptLock variables. To be used as: +** +** assert( walAssertLockmask(pWal) ); +*/ +static int walAssertLockmask(Wal *pWal){ + if( pWal->exclusiveMode==0 ){ + static const int S = 1; + static const int E = (1<readLock<0 ? 0 : (S << WAL_READ_LOCK(pWal->readLock))) + | (pWal->writeLock ? (E << WAL_WRITE_LOCK) : 0) + | (pWal->ckptLock ? (E << WAL_CKPT_LOCK) : 0) +#ifdef SQLITE_ENABLE_SNAPSHOT + | (pWal->pSnapshot ? (pWal->lockMask & (1 << WAL_CKPT_LOCK)) : 0) +#endif + ); + assert( mExpect==pWal->lockMask ); + } + return 1; +} + +/* +** Return and zero the "system error" field set when an +** EXCEPTION_IN_PAGE_ERROR exception is caught. +*/ +SQLITE_PRIVATE int sqlite3WalSystemErrno(Wal *pWal){ + int iRet = 0; + if( pWal ){ + iRet = pWal->iSysErrno; + pWal->iSysErrno = 0; + } + return iRet; +} + +#else +# define walAssertLockmask(x) 1 +#endif /* ifdef SQLITE_USE_SEH */ + /* ** Close a connection to a log file. */ @@ -64202,6 +67388,8 @@ SQLITE_PRIVATE int sqlite3WalClose( if( pWal ){ int isDelete = 0; /* True to unlink wal and wal-index files */ + assert( walAssertLockmask(pWal) ); + /* If an EXCLUSIVE lock can be obtained on the database file (using the ** ordinary, rollback-mode locking methods, this guarantees that the ** connection associated with this log file is the only connection to @@ -64226,7 +67414,7 @@ SQLITE_PRIVATE int sqlite3WalClose( ); if( bPersist!=1 ){ /* Try to delete the WAL file if the checkpoint completed and - ** fsyned (rc==SQLITE_OK) and if we are not in persistent-wal + ** fsynced (rc==SQLITE_OK) and if we are not in persistent-wal ** mode (!bPersist) */ isDelete = 1; }else if( pWal->mxWalSize>=0 ){ @@ -64293,7 +67481,7 @@ static SQLITE_NO_TSAN int walIndexTryHdr(Wal *pWal, int *pChanged){ ** give false-positive warnings about these accesses because the tools do not ** account for the double-read and the memory barrier. The use of mutexes ** here would be problematic as the memory being accessed is potentially - ** shared among multiple processes and not all mutex implementions work + ** shared among multiple processes and not all mutex implementations work ** reliably in that environment. */ aHdr = walIndexHdr(pWal); @@ -64395,7 +67583,9 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ } }else{ int bWriteLock = pWal->writeLock; - if( bWriteLock || SQLITE_OK==(rc = walLockWriter(pWal)) ){ + if( bWriteLock + || SQLITE_OK==(rc = walLockExclusive(pWal, WAL_WRITE_LOCK, 1)) + ){ pWal->writeLock = 1; if( SQLITE_OK==(rc = walIndexPage(pWal, 0, &page0)) ){ badHdr = walIndexTryHdr(pWal, pChanged); @@ -64403,7 +67593,8 @@ static int walIndexReadHdr(Wal *pWal, int *pChanged){ /* If the wal-index header is still malformed even while holding ** a WRITE lock, it can only mean that the header is corrupted and ** needs to be reconstructed. So run recovery to do exactly that. - */ + ** Disable blocking locks first. */ + walDisableBlocking(pWal); rc = walIndexRecover(pWal); *pChanged = 1; } @@ -64613,6 +67804,37 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ return rc; } +/* +** The final argument passed to walTryBeginRead() is of type (int*). The +** caller should invoke walTryBeginRead as follows: +** +** int cnt = 0; +** do { +** rc = walTryBeginRead(..., &cnt); +** }while( rc==WAL_RETRY ); +** +** The final value of "cnt" is of no use to the caller. It is used by +** the implementation of walTryBeginRead() as follows: +** +** + Each time walTryBeginRead() is called, it is incremented. Once +** it reaches WAL_RETRY_PROTOCOL_LIMIT - indicating that walTryBeginRead() +** has many times been invoked and failed with WAL_RETRY - walTryBeginRead() +** returns SQLITE_PROTOCOL. +** +** + If SQLITE_ENABLE_SETLK_TIMEOUT is defined and walTryBeginRead() failed +** because a blocking lock timed out (SQLITE_BUSY_TIMEOUT from the OS +** layer), the WAL_RETRY_BLOCKED_MASK bit is set in "cnt". In this case +** the next invocation of walTryBeginRead() may omit an expected call to +** sqlite3OsSleep(). There has already been a delay when the previous call +** waited on a lock. +*/ +#define WAL_RETRY_PROTOCOL_LIMIT 100 +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT +# define WAL_RETRY_BLOCKED_MASK 0x10000000 +#else +# define WAL_RETRY_BLOCKED_MASK 0 +#endif + /* ** Attempt to start a read transaction. This might fail due to a race or ** other transient condition. When that happens, it returns WAL_RETRY to @@ -64663,13 +67885,16 @@ static int walBeginShmUnreliable(Wal *pWal, int *pChanged){ ** so it takes care to hold an exclusive lock on the corresponding ** WAL_READ_LOCK() while changing values. */ -static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ +static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int *pCnt){ volatile WalCkptInfo *pInfo; /* Checkpoint information in wal-index */ u32 mxReadMark; /* Largest aReadMark[] value */ int mxI; /* Index of largest aReadMark[] value */ int i; /* Loop counter */ int rc = SQLITE_OK; /* Return code */ u32 mxFrame; /* Wal frame to lock to */ +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + int nBlockTmout = 0; +#endif assert( pWal->readLock<0 ); /* Not currently locked */ @@ -64693,14 +67918,34 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ ** so that on the 100th (and last) RETRY we delay for 323 milliseconds. ** The total delay time before giving up is less than 10 seconds. */ - if( cnt>5 ){ + (*pCnt)++; + if( *pCnt>5 ){ int nDelay = 1; /* Pause time in microseconds */ - if( cnt>100 ){ + int cnt = (*pCnt & ~WAL_RETRY_BLOCKED_MASK); + if( cnt>WAL_RETRY_PROTOCOL_LIMIT ){ VVA_ONLY( pWal->lockError = 1; ) return SQLITE_PROTOCOL; } - if( cnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; + if( *pCnt>=10 ) nDelay = (cnt-9)*(cnt-9)*39; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + /* In SQLITE_ENABLE_SETLK_TIMEOUT builds, configure the file-descriptor + ** to block for locks for approximately nDelay us. This affects three + ** locks: (a) the shared lock taken on the DMS slot in os_unix.c (if + ** using os_unix.c), (b) the WRITER lock taken in walIndexReadHdr() if the + ** first attempted read fails, and (c) the shared lock taken on the + ** read-mark. + ** + ** If the previous call failed due to an SQLITE_BUSY_TIMEOUT error, + ** then sleep for the minimum of 1us. The previous call already provided + ** an extra delay while it was blocking on the lock. + */ + nBlockTmout = (nDelay+998) / 1000; + if( !useWal && walEnableBlockingMs(pWal, nBlockTmout) ){ + if( *pCnt & WAL_RETRY_BLOCKED_MASK ) nDelay = 1; + } +#endif sqlite3OsSleep(pWal->pVfs, nDelay); + *pCnt &= ~WAL_RETRY_BLOCKED_MASK; } if( !useWal ){ @@ -64708,6 +67953,13 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ if( pWal->bShmUnreliable==0 ){ rc = walIndexReadHdr(pWal, pChanged); } +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + walDisableBlocking(pWal); + if( rc==SQLITE_BUSY_TIMEOUT ){ + rc = SQLITE_BUSY; + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } +#endif if( rc==SQLITE_BUSY ){ /* If there is not a recovery running in another thread or process ** then convert BUSY errors to WAL_RETRY. If recovery is known to @@ -64744,6 +67996,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ assert( pWal->nWiData>0 ); assert( pWal->apWiData[0]!=0 ); pInfo = walCkptInfo(pWal); + SEH_INJECT_FAULT; if( !useWal && AtomicLoad(&pInfo->nBackfill)==pWal->hdr.mxFrame #ifdef SQLITE_ENABLE_SNAPSHOT && (pWal->pSnapshot==0 || pWal->hdr.mxFrame==0) @@ -64793,7 +68046,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ } #endif for(i=1; iaReadMark+i); + u32 thisMark = AtomicLoad(pInfo->aReadMark+i); SEH_INJECT_FAULT; if( mxReadMark<=thisMark && thisMark<=mxFrame ){ assert( thisMark!=READMARK_NOT_USED ); mxReadMark = thisMark; @@ -64821,9 +68074,19 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ return rc==SQLITE_BUSY ? WAL_RETRY : SQLITE_READONLY_CANTINIT; } + (void)walEnableBlockingMs(pWal, nBlockTmout); rc = walLockShared(pWal, WAL_READ_LOCK(mxI)); + walDisableBlocking(pWal); if( rc ){ - return rc==SQLITE_BUSY ? WAL_RETRY : rc; +#ifdef SQLITE_ENABLE_SETLK_TIMEOUT + if( rc==SQLITE_BUSY_TIMEOUT ){ + *pCnt |= WAL_RETRY_BLOCKED_MASK; + } +#else + assert( rc!=SQLITE_BUSY_TIMEOUT ); +#endif + assert( (rc&0xFF)!=SQLITE_BUSY||rc==SQLITE_BUSY||rc==SQLITE_BUSY_TIMEOUT ); + return (rc&0xFF)==SQLITE_BUSY ? WAL_RETRY : rc; } /* Now that the read-lock has been obtained, check that neither the ** value in the aReadMark[] array or the contents of the wal-index @@ -64859,7 +68122,7 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ ** we can guarantee that the checkpointer that set nBackfill could not ** see any pages past pWal->hdr.mxFrame, this problem does not come up. */ - pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; + pWal->minFrame = AtomicLoad(&pInfo->nBackfill)+1; SEH_INJECT_FAULT; walShmBarrier(pWal); if( AtomicLoad(pInfo->aReadMark+mxI)!=mxReadMark || memcmp((void *)walIndexHdr(pWal), &pWal->hdr, sizeof(WalIndexHdr)) @@ -64874,6 +68137,54 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){ } #ifdef SQLITE_ENABLE_SNAPSHOT +/* +** This function does the work of sqlite3WalSnapshotRecover(). +*/ +static int walSnapshotRecover( + Wal *pWal, /* WAL handle */ + void *pBuf1, /* Temp buffer pWal->szPage bytes in size */ + void *pBuf2 /* Temp buffer pWal->szPage bytes in size */ +){ + int szPage = (int)pWal->szPage; + int rc; + i64 szDb; /* Size of db file in bytes */ + + rc = sqlite3OsFileSize(pWal->pDbFd, &szDb); + if( rc==SQLITE_OK ){ + volatile WalCkptInfo *pInfo = walCkptInfo(pWal); + u32 i = pInfo->nBackfillAttempted; + for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){ + WalHashLoc sLoc; /* Hash table location */ + u32 pgno; /* Page number in db file */ + i64 iDbOff; /* Offset of db file entry */ + i64 iWalOff; /* Offset of wal file entry */ + + rc = walHashGet(pWal, walFramePage(i), &sLoc); + if( rc!=SQLITE_OK ) break; + assert( i - sLoc.iZero - 1 >=0 ); + pgno = sLoc.aPgno[i-sLoc.iZero-1]; + iDbOff = (i64)(pgno-1) * szPage; + + if( iDbOff+szPage<=szDb ){ + iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE; + rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff); + + if( rc==SQLITE_OK ){ + rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff); + } + + if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){ + break; + } + } + + pInfo->nBackfillAttempted = i-1; + } + } + + return rc; +} + /* ** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted ** variable so that older snapshots can be accessed. To do this, loop @@ -64899,50 +68210,21 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){ assert( pWal->readLock>=0 ); rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1); if( rc==SQLITE_OK ){ - volatile WalCkptInfo *pInfo = walCkptInfo(pWal); - int szPage = (int)pWal->szPage; - i64 szDb; /* Size of db file in bytes */ - - rc = sqlite3OsFileSize(pWal->pDbFd, &szDb); - if( rc==SQLITE_OK ){ - void *pBuf1 = sqlite3_malloc(szPage); - void *pBuf2 = sqlite3_malloc(szPage); - if( pBuf1==0 || pBuf2==0 ){ - rc = SQLITE_NOMEM; - }else{ - u32 i = pInfo->nBackfillAttempted; - for(i=pInfo->nBackfillAttempted; i>AtomicLoad(&pInfo->nBackfill); i--){ - WalHashLoc sLoc; /* Hash table location */ - u32 pgno; /* Page number in db file */ - i64 iDbOff; /* Offset of db file entry */ - i64 iWalOff; /* Offset of wal file entry */ - - rc = walHashGet(pWal, walFramePage(i), &sLoc); - if( rc!=SQLITE_OK ) break; - assert( i - sLoc.iZero - 1 >=0 ); - pgno = sLoc.aPgno[i-sLoc.iZero-1]; - iDbOff = (i64)(pgno-1) * szPage; - - if( iDbOff+szPage<=szDb ){ - iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE; - rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff); - - if( rc==SQLITE_OK ){ - rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff); - } - - if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){ - break; - } - } - - pInfo->nBackfillAttempted = i-1; - } + void *pBuf1 = sqlite3_malloc(pWal->szPage); + void *pBuf2 = sqlite3_malloc(pWal->szPage); + if( pBuf1==0 || pBuf2==0 ){ + rc = SQLITE_NOMEM; + }else{ + pWal->ckptLock = 1; + SEH_TRY { + rc = walSnapshotRecover(pWal, pBuf1, pBuf2); } - - sqlite3_free(pBuf1); - sqlite3_free(pBuf2); + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + pWal->ckptLock = 0; } + + sqlite3_free(pBuf1); + sqlite3_free(pBuf2); walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1); } @@ -64951,28 +68233,20 @@ SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){ #endif /* SQLITE_ENABLE_SNAPSHOT */ /* -** Begin a read transaction on the database. -** -** This routine used to be called sqlite3OpenSnapshot() and with good reason: -** it takes a snapshot of the state of the WAL and wal-index for the current -** instant in time. The current thread will continue to use this snapshot. -** Other threads might append new content to the WAL and wal-index but -** that extra content is ignored by the current thread. -** -** If the database contents have changes since the previous read -** transaction, then *pChanged is set to 1 before returning. The -** Pager layer will use this to know that its cache is stale and -** needs to be flushed. +** This function does the work of sqlite3WalBeginReadTransaction() (see +** below). That function simply calls this one inside an SEH_TRY{...} block. */ -SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ +static int walBeginReadTransaction(Wal *pWal, int *pChanged){ int rc; /* Return code */ int cnt = 0; /* Number of TryBeginRead attempts */ #ifdef SQLITE_ENABLE_SNAPSHOT + int ckptLock = 0; int bChanged = 0; WalIndexHdr *pSnapshot = pWal->pSnapshot; #endif assert( pWal->ckptLock==0 ); + assert( pWal->nSehTry>0 ); #ifdef SQLITE_ENABLE_SNAPSHOT if( pSnapshot ){ @@ -64995,12 +68269,12 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ if( rc!=SQLITE_OK ){ return rc; } - pWal->ckptLock = 1; + ckptLock = 1; } #endif do{ - rc = walTryBeginRead(pWal, pChanged, 0, ++cnt); + rc = walTryBeginRead(pWal, pChanged, 0, &cnt); }while( rc==WAL_RETRY ); testcase( (rc&0xff)==SQLITE_BUSY ); testcase( (rc&0xff)==SQLITE_IOERR ); @@ -65059,15 +68333,37 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ } /* Release the shared CKPT lock obtained above. */ - if( pWal->ckptLock ){ + if( ckptLock ){ assert( pSnapshot ); walUnlockShared(pWal, WAL_CKPT_LOCK); - pWal->ckptLock = 0; } #endif return rc; } +/* +** Begin a read transaction on the database. +** +** This routine used to be called sqlite3OpenSnapshot() and with good reason: +** it takes a snapshot of the state of the WAL and wal-index for the current +** instant in time. The current thread will continue to use this snapshot. +** Other threads might append new content to the WAL and wal-index but +** that extra content is ignored by the current thread. +** +** If the database contents have changes since the previous read +** transaction, then *pChanged is set to 1 before returning. The +** Pager layer will use this to know that its cache is stale and +** needs to be flushed. +*/ +SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){ + int rc; + SEH_TRY { + rc = walBeginReadTransaction(pWal, pChanged); + } + SEH_EXCEPT( rc = walHandleException(pWal); ) + return rc; +} + /* ** Finish with a read transaction. All this does is release the ** read-lock. @@ -65088,7 +68384,7 @@ SQLITE_PRIVATE void sqlite3WalEndReadTransaction(Wal *pWal){ ** Return SQLITE_OK if successful, or an error code if an error occurs. If an ** error does occur, the final value of *piRead is undefined. */ -SQLITE_PRIVATE int sqlite3WalFindFrame( +static int walFindFrame( Wal *pWal, /* WAL handle */ Pgno pgno, /* Database page number to read data for */ u32 *piRead /* OUT: Frame number (or zero) */ @@ -65151,6 +68447,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame( } nCollide = HASHTABLE_NSLOT; iKey = walHash(pgno); + SEH_INJECT_FAULT; while( (iH = AtomicLoad(&sLoc.aHash[iKey]))!=0 ){ u32 iFrame = iH + sLoc.iZero; if( iFrame<=iLast && iFrame>=pWal->minFrame && sLoc.aPgno[iH-1]==pgno ){ @@ -65158,6 +68455,7 @@ SQLITE_PRIVATE int sqlite3WalFindFrame( iRead = iFrame; } if( (nCollide--)==0 ){ + *piRead = 0; return SQLITE_CORRUPT_BKPT; } iKey = walNextHash(iKey); @@ -65187,6 +68485,30 @@ SQLITE_PRIVATE int sqlite3WalFindFrame( return SQLITE_OK; } +/* +** Search the wal file for page pgno. If found, set *piRead to the frame that +** contains the page. Otherwise, if pgno is not in the wal file, set *piRead +** to zero. +** +** Return SQLITE_OK if successful, or an error code if an error occurs. If an +** error does occur, the final value of *piRead is undefined. +** +** The difference between this function and walFindFrame() is that this +** function wraps walFindFrame() in an SEH_TRY{...} block. +*/ +SQLITE_PRIVATE int sqlite3WalFindFrame( + Wal *pWal, /* WAL handle */ + Pgno pgno, /* Database page number to read data for */ + u32 *piRead /* OUT: Frame number (or zero) */ +){ + int rc; + SEH_TRY { + rc = walFindFrame(pWal, pgno, piRead); + } + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + return rc; +} + /* ** Read the contents of frame iRead from the wal file into buffer pOut ** (which is nOut bytes in size). Return SQLITE_OK if successful, or an @@ -65268,12 +68590,17 @@ SQLITE_PRIVATE int sqlite3WalBeginWriteTransaction(Wal *pWal){ ** time the read transaction on this connection was started, then ** the write is disallowed. */ - if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ + SEH_TRY { + if( memcmp(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr))!=0 ){ + rc = SQLITE_BUSY_SNAPSHOT; + } + } + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) + + if( rc!=SQLITE_OK ){ walUnlockExclusive(pWal, WAL_WRITE_LOCK, 1); pWal->writeLock = 0; - rc = SQLITE_BUSY_SNAPSHOT; } - return rc; } @@ -65309,30 +68636,33 @@ SQLITE_PRIVATE int sqlite3WalUndo(Wal *pWal, int (*xUndo)(void *, Pgno), void *p Pgno iMax = pWal->hdr.mxFrame; Pgno iFrame; - /* Restore the clients cache of the wal-index header to the state it - ** was in before the client began writing to the database. - */ - memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); - - for(iFrame=pWal->hdr.mxFrame+1; - ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; - iFrame++ - ){ - /* This call cannot fail. Unless the page for which the page number - ** is passed as the second argument is (a) in the cache and - ** (b) has an outstanding reference, then xUndo is either a no-op - ** (if (a) is false) or simply expels the page from the cache (if (b) - ** is false). - ** - ** If the upper layer is doing a rollback, it is guaranteed that there - ** are no outstanding references to any page other than page 1. And - ** page 1 is never written to the log until the transaction is - ** committed. As a result, the call to xUndo may not fail. + SEH_TRY { + /* Restore the clients cache of the wal-index header to the state it + ** was in before the client began writing to the database. */ - assert( walFramePgno(pWal, iFrame)!=1 ); - rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); + memcpy(&pWal->hdr, (void *)walIndexHdr(pWal), sizeof(WalIndexHdr)); + + for(iFrame=pWal->hdr.mxFrame+1; + ALWAYS(rc==SQLITE_OK) && iFrame<=iMax; + iFrame++ + ){ + /* This call cannot fail. Unless the page for which the page number + ** is passed as the second argument is (a) in the cache and + ** (b) has an outstanding reference, then xUndo is either a no-op + ** (if (a) is false) or simply expels the page from the cache (if (b) + ** is false). + ** + ** If the upper layer is doing a rollback, it is guaranteed that there + ** are no outstanding references to any page other than page 1. And + ** page 1 is never written to the log until the transaction is + ** committed. As a result, the call to xUndo may not fail. + */ + assert( walFramePgno(pWal, iFrame)!=1 ); + rc = xUndo(pUndoCtx, walFramePgno(pWal, iFrame)); + } + if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); } - if( iMax!=pWal->hdr.mxFrame ) walCleanupHash(pWal); + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) } return rc; } @@ -65376,7 +68706,10 @@ SQLITE_PRIVATE int sqlite3WalSavepointUndo(Wal *pWal, u32 *aWalData){ pWal->hdr.mxFrame = aWalData[0]; pWal->hdr.aFrameCksum[0] = aWalData[1]; pWal->hdr.aFrameCksum[1] = aWalData[2]; - walCleanupHash(pWal); + SEH_TRY { + walCleanupHash(pWal); + } + SEH_EXCEPT( rc = SQLITE_IOERR_IN_PAGE; ) } return rc; @@ -65426,7 +68759,7 @@ static int walRestartLog(Wal *pWal){ cnt = 0; do{ int notUsed; - rc = walTryBeginRead(pWal, ¬Used, 1, ++cnt); + rc = walTryBeginRead(pWal, ¬Used, 1, &cnt); }while( rc==WAL_RETRY ); assert( (rc&0xff)!=SQLITE_BUSY ); /* BUSY not possible when useWal==1 */ testcase( (rc&0xff)==SQLITE_IOERR ); @@ -65561,7 +68894,7 @@ static int walRewriteChecksums(Wal *pWal, u32 iLast){ ** Write a set of frames to the log. The caller must hold the write-lock ** on the log file (obtained using sqlite3WalBeginWriteTransaction()). */ -SQLITE_PRIVATE int sqlite3WalFrames( +static int walFrames( Wal *pWal, /* Wal handle to write to */ int szPage, /* Database page-size in bytes */ PgHdr *pList, /* List of dirty pages to write */ @@ -65649,7 +68982,9 @@ SQLITE_PRIVATE int sqlite3WalFrames( if( rc ) return rc; } } - assert( (int)pWal->szPage==szPage ); + if( (int)pWal->szPage!=szPage ){ + return SQLITE_CORRUPT_BKPT; /* TH3 test case: cov1/corrupt155.test */ + } /* Setup information needed to write frames into the WAL */ w.pWal = pWal; @@ -65670,7 +69005,7 @@ SQLITE_PRIVATE int sqlite3WalFrames( ** checksums must be recomputed when the transaction is committed. */ if( iFirst && (p->pDirty || isCommit==0) ){ u32 iWrite = 0; - VVA_ONLY(rc =) sqlite3WalFindFrame(pWal, p->pgno, &iWrite); + VVA_ONLY(rc =) walFindFrame(pWal, p->pgno, &iWrite); assert( rc==SQLITE_OK || iWrite==0 ); if( iWrite>=iFirst ){ i64 iOff = walFrameOffset(iWrite, szPage) + WAL_FRAME_HDRSIZE; @@ -65793,6 +69128,29 @@ SQLITE_PRIVATE int sqlite3WalFrames( return rc; } +/* +** Write a set of frames to the log. The caller must hold the write-lock +** on the log file (obtained using sqlite3WalBeginWriteTransaction()). +** +** The difference between this function and walFrames() is that this +** function wraps walFrames() in an SEH_TRY{...} block. +*/ +SQLITE_PRIVATE int sqlite3WalFrames( + Wal *pWal, /* Wal handle to write to */ + int szPage, /* Database page-size in bytes */ + PgHdr *pList, /* List of dirty pages to write */ + Pgno nTruncate, /* Database size after this commit */ + int isCommit, /* True if this is a commit */ + int sync_flags /* Flags to pass to OsSync() (or 0) */ +){ + int rc; + SEH_TRY { + rc = walFrames(pWal, szPage, pList, nTruncate, isCommit, sync_flags); + } + SEH_EXCEPT( rc = walHandleException(pWal); ) + return rc; +} + /* ** This routine is called to implement sqlite3_wal_checkpoint() and ** related interfaces. @@ -65830,10 +69188,9 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( if( pWal->readOnly ) return SQLITE_READONLY; WALTRACE(("WAL%p: checkpoint begins\n", pWal)); - /* Enable blocking locks, if possible. If blocking locks are successfully - ** enabled, set xBusy2=0 so that the busy-handler is never invoked. */ + /* Enable blocking locks, if possible. */ sqlite3WalDb(pWal, db); - (void)walEnableBlocking(pWal); + if( xBusy2 ) (void)walEnableBlocking(pWal); /* IMPLEMENTATION-OF: R-62028-47212 All calls obtain an exclusive ** "checkpoint" lock on the database file. @@ -65872,30 +69229,38 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint( /* Read the wal-index header. */ - if( rc==SQLITE_OK ){ - walDisableBlocking(pWal); - rc = walIndexReadHdr(pWal, &isChanged); - (void)walEnableBlocking(pWal); - if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ - sqlite3OsUnfetch(pWal->pDbFd, 0, 0); + SEH_TRY { + if( rc==SQLITE_OK ){ + /* For a passive checkpoint, do not re-enable blocking locks after + ** reading the wal-index header. A passive checkpoint should not block + ** or invoke the busy handler. The only lock such a checkpoint may + ** attempt to obtain is a lock on a read-slot, and it should give up + ** immediately and do a partial checkpoint if it cannot obtain it. */ + walDisableBlocking(pWal); + rc = walIndexReadHdr(pWal, &isChanged); + if( eMode2!=SQLITE_CHECKPOINT_PASSIVE ) (void)walEnableBlocking(pWal); + if( isChanged && pWal->pDbFd->pMethods->iVersion>=3 ){ + sqlite3OsUnfetch(pWal->pDbFd, 0, 0); + } } - } - /* Copy data from the log to the database file. */ - if( rc==SQLITE_OK ){ - - if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ - rc = SQLITE_CORRUPT_BKPT; - }else{ - rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf); - } + /* Copy data from the log to the database file. */ + if( rc==SQLITE_OK ){ + if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){ + rc = SQLITE_CORRUPT_BKPT; + }else{ + rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags,zBuf); + } - /* If no error occurred, set the output variables. */ - if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ - if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; - if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); + /* If no error occurred, set the output variables. */ + if( rc==SQLITE_OK || rc==SQLITE_BUSY ){ + if( pnLog ) *pnLog = (int)pWal->hdr.mxFrame; + SEH_INJECT_FAULT; + if( pnCkpt ) *pnCkpt = (int)(walCkptInfo(pWal)->nBackfill); + } } } + SEH_EXCEPT( rc = walHandleException(pWal); ) if( isChanged ){ /* If a new wal-index header was loaded before the checkpoint was @@ -65972,7 +69337,9 @@ SQLITE_PRIVATE int sqlite3WalExclusiveMode(Wal *pWal, int op){ ** locks are taken in this case). Nor should the pager attempt to ** upgrade to exclusive-mode following such an error. */ +#ifndef SQLITE_USE_SEH assert( pWal->readLock>=0 || pWal->lockError ); +#endif assert( pWal->readLock>=0 || (op<=0 && pWal->exclusiveMode==0) ); if( op==0 ){ @@ -66073,16 +69440,19 @@ SQLITE_API int sqlite3_snapshot_cmp(sqlite3_snapshot *p1, sqlite3_snapshot *p2){ */ SQLITE_PRIVATE int sqlite3WalSnapshotCheck(Wal *pWal, sqlite3_snapshot *pSnapshot){ int rc; - rc = walLockShared(pWal, WAL_CKPT_LOCK); - if( rc==SQLITE_OK ){ - WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot; - if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt)) - || pNew->mxFramenBackfillAttempted - ){ - rc = SQLITE_ERROR_SNAPSHOT; - walUnlockShared(pWal, WAL_CKPT_LOCK); + SEH_TRY { + rc = walLockShared(pWal, WAL_CKPT_LOCK); + if( rc==SQLITE_OK ){ + WalIndexHdr *pNew = (WalIndexHdr*)pSnapshot; + if( memcmp(pNew->aSalt, pWal->hdr.aSalt, sizeof(pWal->hdr.aSalt)) + || pNew->mxFramenBackfillAttempted + ){ + rc = SQLITE_ERROR_SNAPSHOT; + walUnlockShared(pWal, WAL_CKPT_LOCK); + } } } + SEH_EXCEPT( rc = walHandleException(pWal); ) return rc; } @@ -66205,7 +69575,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){ ** 22 1 Min embedded payload fraction (must be 32) ** 23 1 Min leaf payload fraction (must be 32) ** 24 4 File change counter -** 28 4 Reserved for future use +** 28 4 The size of the database in pages ** 32 4 First freelist page ** 36 4 Number of freelist pages in the file ** 40 60 15 4-byte meta values passed to higher layers @@ -66313,7 +69683,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){ ** byte are used. The integer consists of all bytes that have bit 8 set and ** the first byte with bit 8 clear. The most significant byte of the integer ** appears first. A variable-length integer may not be more than 9 bytes long. -** As a special case, all 8 bytes of the 9th byte are used as data. This +** As a special case, all 8 bits of the 9th byte are used as data. This ** allows a 64-bit integer to be encoded in 9 bytes. ** ** 0x00 becomes 0x00000000 @@ -66321,7 +69691,7 @@ SQLITE_PRIVATE sqlite3_file *sqlite3WalFile(Wal *pWal){ ** 0x81 0x00 becomes 0x00000080 ** 0x82 0x00 becomes 0x00000100 ** 0x80 0x7f becomes 0x0000007f -** 0x8a 0x91 0xd1 0xac 0x78 becomes 0x12345678 +** 0x81 0x91 0xd1 0xac 0x78 becomes 0x12345678 ** 0x81 0x81 0x81 0x81 0x01 becomes 0x10204081 ** ** Variable length integers are used for rowids and to hold the number of @@ -66404,7 +69774,7 @@ typedef struct CellInfo CellInfo; ** page that has been loaded into memory. The information in this object ** is derived from the raw on-disk page content. ** -** As each database page is loaded into memory, the pager allocats an +** As each database page is loaded into memory, the pager allocates an ** instance of this object and zeros the first 8 bytes. (This is the ** "extra" information associated with each page of the pager.) ** @@ -66697,7 +70067,7 @@ struct BtCursor { #define BTCF_WriteFlag 0x01 /* True if a write cursor */ #define BTCF_ValidNKey 0x02 /* True if info.nKey is valid */ #define BTCF_ValidOvfl 0x04 /* True if aOverflow is valid */ -#define BTCF_AtLast 0x08 /* Cursor is pointing ot the last entry */ +#define BTCF_AtLast 0x08 /* Cursor is pointing to the last entry */ #define BTCF_Incrblob 0x10 /* True if an incremental I/O handle */ #define BTCF_Multiple 0x20 /* Maybe another cursor on the same btree */ #define BTCF_Pinned 0x40 /* Cursor is busy and cannot be moved */ @@ -66815,15 +70185,15 @@ struct BtCursor { ** So, this macro is defined instead. */ #ifndef SQLITE_OMIT_AUTOVACUUM -#define ISAUTOVACUUM (pBt->autoVacuum) +#define ISAUTOVACUUM(pBt) (pBt->autoVacuum) #else -#define ISAUTOVACUUM 0 +#define ISAUTOVACUUM(pBt) 0 #endif /* -** This structure is passed around through all the sanity checking routines -** in order to keep track of some global state information. +** This structure is passed around through all the PRAGMA integrity_check +** checking routines in order to keep track of some global state information. ** ** The aRef[] array is allocated so that there is 1 bit for each page in ** the database. As the integrity-check proceeds, for each page used in @@ -66836,13 +70206,15 @@ struct IntegrityCk { BtShared *pBt; /* The tree being checked out */ Pager *pPager; /* The associated pager. Also accessible by pBt->pPager */ u8 *aPgRef; /* 1 bit per page in the db (see above) */ - Pgno nPage; /* Number of pages in the database */ + Pgno nCkPage; /* Pages in the database. 0 for partial check */ int mxErr; /* Stop accumulating errors when this reaches zero */ int nErr; /* Number of messages written to zErrMsg so far */ - int bOomFault; /* A memory allocation error has occurred */ + int rc; /* SQLITE_OK, SQLITE_NOMEM, or SQLITE_INTERRUPT */ + u32 nStep; /* Number of steps into the integrity_check process */ const char *zPfx; /* Error message prefix */ - Pgno v1; /* Value for first %u substitution in zPfx */ - int v2; /* Value for second %d substitution in zPfx */ + Pgno v0; /* Value for first %u substitution in zPfx (root page) */ + Pgno v1; /* Value for second %u substitution in zPfx (current pg) */ + int v2; /* Value for third %d substitution in zPfx */ StrAccum errMsg; /* Accumulate the error message text here */ u32 *heap; /* Min-heap used for analyzing cell coverage */ sqlite3 *db; /* Database connection running the check */ @@ -66858,7 +70230,7 @@ struct IntegrityCk { /* ** get2byteAligned(), unlike get2byte(), requires that its argument point to a -** two-byte aligned address. get2bytea() is only used for accessing the +** two-byte aligned address. get2byteAligned() is only used for accessing the ** cell addresses in a btree header. */ #if SQLITE_BYTEORDER==4321 @@ -67035,7 +70407,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){ ** ** There is a corresponding leave-all procedures. ** -** Enter the mutexes in accending order by BtShared pointer address +** Enter the mutexes in ascending order by BtShared pointer address ** to avoid the possibility of deadlock when two threads with ** two or more btrees in common both try to lock all their btrees ** at the same instant. @@ -67109,6 +70481,7 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsAllMutexes(sqlite3 *db){ SQLITE_PRIVATE int sqlite3SchemaMutexHeld(sqlite3 *db, int iDb, Schema *pSchema){ Btree *p; assert( db!=0 ); + if( db->pVfs==0 && db->nDb==0 ) return 1; if( pSchema ) iDb = sqlite3SchemaToIndex(db, pSchema); assert( iDb>=0 && iDbnDb ); if( !sqlite3_mutex_held(db->mutex) ) return 0; @@ -67304,8 +70677,8 @@ SQLITE_PRIVATE sqlite3_uint64 sqlite3BtreeSeekCount(Btree *pBt){ int corruptPageError(int lineno, MemPage *p){ char *zMsg; sqlite3BeginBenignMalloc(); - zMsg = sqlite3_mprintf("database corruption page %d of %s", - (int)p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0) + zMsg = sqlite3_mprintf("database corruption page %u of %s", + p->pgno, sqlite3PagerFilename(p->pBt->pPager, 0) ); sqlite3EndBenignMalloc(); if( zMsg ){ @@ -68114,8 +71487,25 @@ SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor *pCur, int *pDifferentRow) */ SQLITE_PRIVATE void sqlite3BtreeCursorHint(BtCursor *pCur, int eHintType, ...){ /* Used only by system that substitute their own storage engine */ +#ifdef SQLITE_DEBUG + if( ALWAYS(eHintType==BTREE_HINT_RANGE) ){ + va_list ap; + Expr *pExpr; + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = sqlite3CursorRangeHintExprCheck; + va_start(ap, eHintType); + pExpr = va_arg(ap, Expr*); + w.u.aMem = va_arg(ap, Mem*); + va_end(ap); + assert( pExpr!=0 ); + assert( w.u.aMem!=0 ); + sqlite3WalkExpr(&w, pExpr); + } +#endif /* SQLITE_DEBUG */ } -#endif +#endif /* SQLITE_ENABLE_CURSOR_HINTS */ + /* ** Provide flag hints to the cursor. @@ -68200,7 +71590,7 @@ static void ptrmapPut(BtShared *pBt, Pgno key, u8 eType, Pgno parent, int *pRC){ pPtrmap = (u8 *)sqlite3PagerGetData(pDbPage); if( eType!=pPtrmap[offset] || get4byte(&pPtrmap[offset+1])!=parent ){ - TRACE(("PTRMAP_UPDATE: %d->(%d,%d)\n", key, eType, parent)); + TRACE(("PTRMAP_UPDATE: %u->(%u,%u)\n", key, eType, parent)); *pRC= rc = sqlite3PagerWrite(pDbPage); if( rc==SQLITE_OK ){ pPtrmap[offset] = eType; @@ -68399,27 +71789,31 @@ static void btreeParseCellPtr( iKey = *pIter; if( iKey>=0x80 ){ u8 x; - iKey = ((iKey&0x7f)<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x =*++pIter) & 0x7f); + iKey = (iKey<<7) ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x10204000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<7) | ((x = *++pIter) & 0x7f); + iKey = (iKey<<7) ^ 0x4000 ^ (x = *++pIter); if( x>=0x80 ){ - iKey = (iKey<<8) | (*++pIter); + iKey = (iKey<<8) ^ 0x8000 ^ (*++pIter); } } } } } + }else{ + iKey ^= 0x204000; } + }else{ + iKey ^= 0x4000; } } pIter++; @@ -68496,10 +71890,11 @@ static void btreeParseCell( ** ** cellSizePtrNoPayload() => table internal nodes ** cellSizePtrTableLeaf() => table leaf nodes -** cellSizePtr() => all index nodes & table leaf nodes +** cellSizePtr() => index internal nodes +** cellSizeIdxLeaf() => index leaf nodes */ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ - u8 *pIter = pCell + pPage->childPtrSize; /* For looping over bytes of pCell */ + u8 *pIter = pCell + 4; /* For looping over bytes of pCell */ u8 *pEnd; /* End mark for a varint */ u32 nSize; /* Size value to return */ @@ -68512,6 +71907,49 @@ static u16 cellSizePtr(MemPage *pPage, u8 *pCell){ pPage->xParseCell(pPage, pCell, &debuginfo); #endif + assert( pPage->childPtrSize==4 ); + nSize = *pIter; + if( nSize>=0x80 ){ + pEnd = &pIter[8]; + nSize &= 0x7f; + do{ + nSize = (nSize<<7) | (*++pIter & 0x7f); + }while( *(pIter)>=0x80 && pItermaxLocal ); + testcase( nSize==(u32)pPage->maxLocal+1 ); + if( nSize<=pPage->maxLocal ){ + nSize += (u32)(pIter - pCell); + assert( nSize>4 ); + }else{ + int minLocal = pPage->minLocal; + nSize = minLocal + (nSize - minLocal) % (pPage->pBt->usableSize - 4); + testcase( nSize==pPage->maxLocal ); + testcase( nSize==(u32)pPage->maxLocal+1 ); + if( nSize>pPage->maxLocal ){ + nSize = minLocal; + } + nSize += 4 + (u16)(pIter - pCell); + } + assert( nSize==debuginfo.nSize || CORRUPT_DB ); + return (u16)nSize; +} +static u16 cellSizePtrIdxLeaf(MemPage *pPage, u8 *pCell){ + u8 *pIter = pCell; /* For looping over bytes of pCell */ + u8 *pEnd; /* End mark for a varint */ + u32 nSize; /* Size value to return */ + +#ifdef SQLITE_DEBUG + /* The value returned by this function should always be the same as + ** the (CellInfo.nSize) value found by doing a full parse of the + ** cell. If SQLITE_DEBUG is defined, an assert() at the bottom of + ** this function verifies that this invariant is not violated. */ + CellInfo debuginfo; + pPage->xParseCell(pPage, pCell, &debuginfo); +#endif + + assert( pPage->childPtrSize==0 ); nSize = *pIter; if( nSize>=0x80 ){ pEnd = &pIter[8]; @@ -68636,7 +72074,7 @@ static void ptrmapPutOvflPtr(MemPage *pPage, MemPage *pSrc, u8 *pCell,int *pRC){ pPage->xParseCell(pPage, pCell, &info); if( info.nLocalaDataEnd, pCell, pCell+info.nLocal) ){ + if( SQLITE_OVERFLOW(pSrc->aDataEnd, pCell, pCell+info.nLocal) ){ testcase( pSrc!=pPage ); *pRC = SQLITE_CORRUPT_BKPT; return; @@ -68681,8 +72119,7 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){ assert( pPage->pBt->usableSize <= SQLITE_MAX_PAGE_SIZE ); assert( pPage->nOverflow==0 ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - temp = 0; - src = data = pPage->aData; + data = pPage->aData; hdr = pPage->hdrOffset; cellOffset = pPage->cellOffset; nCell = pPage->nCell; @@ -68736,39 +72173,38 @@ static int defragmentPage(MemPage *pPage, int nMaxFrag){ cbrk = usableSize; iCellLast = usableSize - 4; iCellStart = get2byte(&data[hdr+5]); - for(i=0; iiCellLast ){ - return SQLITE_CORRUPT_PAGE(pPage); - } - assert( pc>=iCellStart && pc<=iCellLast ); - size = pPage->xCellSize(pPage, &src[pc]); - cbrk -= size; - if( cbrkusableSize ){ - return SQLITE_CORRUPT_PAGE(pPage); - } - assert( cbrk+size<=usableSize && cbrk>=iCellStart ); - testcase( cbrk+size==usableSize ); - testcase( pc+size==usableSize ); - put2byte(pAddr, cbrk); - if( temp==0 ){ - if( cbrk==pc ) continue; - temp = sqlite3PagerTempSpace(pPage->pBt->pPager); - memcpy(&temp[iCellStart], &data[iCellStart], usableSize - iCellStart); - src = temp; + if( nCell>0 ){ + temp = sqlite3PagerTempSpace(pPage->pBt->pPager); + memcpy(temp, data, usableSize); + src = temp; + for(i=0; iiCellLast ){ + return SQLITE_CORRUPT_PAGE(pPage); + } + assert( pc>=0 && pc<=iCellLast ); + size = pPage->xCellSize(pPage, &src[pc]); + cbrk -= size; + if( cbrkusableSize ){ + return SQLITE_CORRUPT_PAGE(pPage); + } + assert( cbrk+size<=usableSize && cbrk>=iCellStart ); + testcase( cbrk+size==usableSize ); + testcase( pc+size==usableSize ); + put2byte(pAddr, cbrk); + memcpy(&data[cbrk], &src[pc], size); } - memcpy(&data[cbrk], &src[pc], size); } data[hdr+7] = 0; - defragment_out: +defragment_out: assert( pPage->nFree>=0 ); if( data[hdr+7]+cbrk-iCellFirst!=pPage->nFree ){ return SQLITE_CORRUPT_PAGE(pPage); @@ -68825,7 +72261,6 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ ** fragmented bytes within the page. */ memcpy(&aData[iAddr], &aData[pc], 2); aData[hdr+7] += (u8)x; - testcase( pc+x>maxPC ); return &aData[pc]; }else if( x+pc > maxPC ){ /* This slot extends off the end of the usable part of the page */ @@ -68841,9 +72276,9 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ iAddr = pc; pTmp = &aData[pc]; pc = get2byte(pTmp); - if( pc<=iAddr+size ){ + if( pc<=iAddr ){ if( pc ){ - /* The next slot in the chain is not past the end of the current slot */ + /* The next slot in the chain comes before the current slot */ *pRc = SQLITE_CORRUPT_PAGE(pPg); } return 0; @@ -68869,7 +72304,7 @@ static u8 *pageFindSlot(MemPage *pPg, int nByte, int *pRc){ ** allocation is being made in order to insert a new cell, so we will ** also end up needing a new cell pointer. */ -static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ +static SQLITE_INLINE int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ const int hdr = pPage->hdrOffset; /* Local cache of pPage->hdrOffset */ u8 * const data = pPage->aData; /* Local cache of pPage->aData */ int top; /* First byte of cell content area */ @@ -68895,13 +72330,14 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ ** integer, so a value of 0 is used in its place. */ pTmp = &data[hdr+5]; top = get2byte(pTmp); - assert( top<=(int)pPage->pBt->usableSize ); /* by btreeComputeFreeSpace() */ if( gap>top ){ if( top==0 && pPage->pBt->usableSize==65536 ){ top = 65536; }else{ return SQLITE_CORRUPT_PAGE(pPage); } + }else if( top>(int)pPage->pBt->usableSize ){ + return SQLITE_CORRUPT_PAGE(pPage); } /* If there is enough space between gap and top for one more cell pointer, @@ -68963,7 +72399,7 @@ static int allocateSpace(MemPage *pPage, int nByte, int *pIdx){ ** ** Even though the freeblock list was checked by btreeComputeFreeSpace(), ** that routine will not detect overlap between cells or freeblocks. Nor -** does it detect cells or freeblocks that encrouch into the reserved bytes +** does it detect cells or freeblocks that encroach into the reserved bytes ** at the end of the page. So do additional corruption checks inside this ** routine and return SQLITE_CORRUPT if any problems are found. */ @@ -68984,7 +72420,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ assert( CORRUPT_DB || iEnd <= pPage->pBt->usableSize ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( iSize>=4 ); /* Minimum cell size is 4 */ - assert( iStart<=pPage->pBt->usableSize-4 ); + assert( CORRUPT_DB || iStart<=pPage->pBt->usableSize-4 ); /* The list of freeblocks must be in ascending order. Find the ** spot on the list where iStart should be inserted. @@ -68995,7 +72431,7 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ iFreeBlk = 0; /* Shortcut for the case when the freelist is empty */ }else{ while( (iFreeBlk = get2byte(&data[iPtr]))pBt->btsFlags & BTS_FAST_SECURE ){ + /* Overwrite deleted information with zeros when the secure_delete + ** option is enabled */ + memset(&data[iStart], 0, iSize); + } if( iStart<=x ){ /* The new freeblock is at the beginning of the cell content area, ** so just extend the cell content area rather than create another @@ -69052,14 +72493,9 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ }else{ /* Insert the new freeblock into the freelist */ put2byte(&data[iPtr], iStart); + put2byte(&data[iStart], iFreeBlk); + put2byte(&data[iStart+2], iSize); } - if( pPage->pBt->btsFlags & BTS_FAST_SECURE ){ - /* Overwrite deleted information with zeros when the secure_delete - ** option is enabled */ - memset(&data[iStart], 0, iSize); - } - put2byte(&data[iStart], iFreeBlk); - put2byte(&data[iStart+2], iSize); pPage->nFree += iOrigSize; return SQLITE_OK; } @@ -69071,62 +72507,67 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){ ** Only the following combinations are supported. Anything different ** indicates a corrupt database files: ** -** PTF_ZERODATA -** PTF_ZERODATA | PTF_LEAF -** PTF_LEAFDATA | PTF_INTKEY -** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF +** PTF_ZERODATA (0x02, 2) +** PTF_LEAFDATA | PTF_INTKEY (0x05, 5) +** PTF_ZERODATA | PTF_LEAF (0x0a, 10) +** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF (0x0d, 13) */ static int decodeFlags(MemPage *pPage, int flagByte){ BtShared *pBt; /* A copy of pPage->pBt */ assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) ); assert( sqlite3_mutex_held(pPage->pBt->mutex) ); - pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 ); - flagByte &= ~PTF_LEAF; - pPage->childPtrSize = 4-4*pPage->leaf; pBt = pPage->pBt; - if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ - /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an - ** interior table b-tree page. */ - assert( (PTF_LEAFDATA|PTF_INTKEY)==5 ); - /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a - ** leaf table b-tree page. */ - assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 ); - pPage->intKey = 1; - if( pPage->leaf ){ + pPage->max1bytePayload = pBt->max1bytePayload; + if( flagByte>=(PTF_ZERODATA | PTF_LEAF) ){ + pPage->childPtrSize = 0; + pPage->leaf = 1; + if( flagByte==(PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF) ){ pPage->intKeyLeaf = 1; pPage->xCellSize = cellSizePtrTableLeaf; pPage->xParseCell = btreeParseCellPtr; + pPage->intKey = 1; + pPage->maxLocal = pBt->maxLeaf; + pPage->minLocal = pBt->minLeaf; + }else if( flagByte==(PTF_ZERODATA | PTF_LEAF) ){ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtrIdxLeaf; + pPage->xParseCell = btreeParseCellPtrIndex; + pPage->maxLocal = pBt->maxLocal; + pPage->minLocal = pBt->minLocal; }else{ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtrIdxLeaf; + pPage->xParseCell = btreeParseCellPtrIndex; + return SQLITE_CORRUPT_PAGE(pPage); + } + }else{ + pPage->childPtrSize = 4; + pPage->leaf = 0; + if( flagByte==(PTF_ZERODATA) ){ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtr; + pPage->xParseCell = btreeParseCellPtrIndex; + pPage->maxLocal = pBt->maxLocal; + pPage->minLocal = pBt->minLocal; + }else if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){ pPage->intKeyLeaf = 0; pPage->xCellSize = cellSizePtrNoPayload; pPage->xParseCell = btreeParseCellPtrNoPayload; + pPage->intKey = 1; + pPage->maxLocal = pBt->maxLeaf; + pPage->minLocal = pBt->minLeaf; + }else{ + pPage->intKey = 0; + pPage->intKeyLeaf = 0; + pPage->xCellSize = cellSizePtr; + pPage->xParseCell = btreeParseCellPtrIndex; + return SQLITE_CORRUPT_PAGE(pPage); } - pPage->maxLocal = pBt->maxLeaf; - pPage->minLocal = pBt->minLeaf; - }else if( flagByte==PTF_ZERODATA ){ - /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an - ** interior index b-tree page. */ - assert( (PTF_ZERODATA)==2 ); - /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a - ** leaf index b-tree page. */ - assert( (PTF_ZERODATA|PTF_LEAF)==10 ); - pPage->intKey = 0; - pPage->intKeyLeaf = 0; - pPage->xCellSize = cellSizePtr; - pPage->xParseCell = btreeParseCellPtrIndex; - pPage->maxLocal = pBt->maxLocal; - pPage->minLocal = pBt->minLocal; - }else{ - /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is - ** an error. */ - pPage->intKey = 0; - pPage->intKeyLeaf = 0; - pPage->xCellSize = cellSizePtr; - pPage->xParseCell = btreeParseCellPtrIndex; - return SQLITE_CORRUPT_PAGE(pPage); } - pPage->max1bytePayload = pBt->max1bytePayload; return SQLITE_OK; } @@ -69417,70 +72858,41 @@ SQLITE_PRIVATE Pgno sqlite3BtreeLastPage(Btree *p){ /* ** Get a page from the pager and initialize it. -** -** If pCur!=0 then the page is being fetched as part of a moveToChild() -** call. Do additional sanity checking on the page in this case. -** And if the fetch fails, this routine must decrement pCur->iPage. -** -** The page is fetched as read-write unless pCur is not NULL and is -** a read-only cursor. -** -** If an error occurs, then *ppPage is undefined. It -** may remain unchanged, or it may be set to an invalid value. */ static int getAndInitPage( BtShared *pBt, /* The database file */ Pgno pgno, /* Number of the page to get */ MemPage **ppPage, /* Write the page pointer here */ - BtCursor *pCur, /* Cursor to receive the page, or NULL */ int bReadOnly /* True for a read-only page */ ){ int rc; DbPage *pDbPage; + MemPage *pPage; assert( sqlite3_mutex_held(pBt->mutex) ); - assert( pCur==0 || ppPage==&pCur->pPage ); - assert( pCur==0 || bReadOnly==pCur->curPagerFlags ); - assert( pCur==0 || pCur->iPage>0 ); if( pgno>btreePagecount(pBt) ){ - rc = SQLITE_CORRUPT_BKPT; - goto getAndInitPage_error1; + *ppPage = 0; + return SQLITE_CORRUPT_BKPT; } rc = sqlite3PagerGet(pBt->pPager, pgno, (DbPage**)&pDbPage, bReadOnly); if( rc ){ - goto getAndInitPage_error1; + *ppPage = 0; + return rc; } - *ppPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); - if( (*ppPage)->isInit==0 ){ + pPage = (MemPage*)sqlite3PagerGetExtra(pDbPage); + if( pPage->isInit==0 ){ btreePageFromDbPage(pDbPage, pgno, pBt); - rc = btreeInitPage(*ppPage); + rc = btreeInitPage(pPage); if( rc!=SQLITE_OK ){ - goto getAndInitPage_error2; + releasePage(pPage); + *ppPage = 0; + return rc; } } - assert( (*ppPage)->pgno==pgno || CORRUPT_DB ); - assert( (*ppPage)->aData==sqlite3PagerGetData(pDbPage) ); - - /* If obtaining a child page for a cursor, we must verify that the page is - ** compatible with the root page. */ - if( pCur && ((*ppPage)->nCell<1 || (*ppPage)->intKey!=pCur->curIntKey) ){ - rc = SQLITE_CORRUPT_PGNO(pgno); - goto getAndInitPage_error2; - } + assert( pPage->pgno==pgno || CORRUPT_DB ); + assert( pPage->aData==sqlite3PagerGetData(pDbPage) ); + *ppPage = pPage; return SQLITE_OK; - -getAndInitPage_error2: - releasePage(*ppPage); -getAndInitPage_error1: - if( pCur ){ - pCur->iPage--; - pCur->pPage = pCur->apPage[pCur->iPage]; - } - testcase( pgno==0 ); - assert( pgno!=0 || rc==SQLITE_CORRUPT - || rc==SQLITE_IOERR_NOMEM - || rc==SQLITE_NOMEM ); - return rc; } /* @@ -69563,7 +72975,7 @@ static void pageReinit(DbPage *pData){ ** call to btreeInitPage() will likely return SQLITE_CORRUPT. ** But no harm is done by this. And it is very important that ** btreeInitPage() be called on every btree page so we make - ** the call for every page that comes in for re-initing. */ + ** the call for every page that comes in for re-initializing. */ btreeInitPage(pPage); } } @@ -69742,6 +73154,9 @@ SQLITE_PRIVATE int sqlite3BtreeOpen( assert( sizeof(u16)==2 ); assert( sizeof(Pgno)==4 ); + /* Suppress false-positive compiler warning from PVS-Studio */ + memset(&zDbHeader[16], 0, 8); + pBt = sqlite3MallocZero( sizeof(*pBt) ); if( pBt==0 ){ rc = SQLITE_NOMEM_BKPT; @@ -69958,7 +73373,7 @@ static SQLITE_NOINLINE int allocateTempSpace(BtShared *pBt){ ** can mean that fillInCell() only initializes the first 2 or 3 ** bytes of pTmpSpace, but that the first 4 bytes are copied from ** it into a database page. This is not actually a problem, but it - ** does cause a valgrind error when the 1 or 2 bytes of unitialized + ** does cause a valgrind error when the 1 or 2 bytes of uninitialized ** data is passed to system call write(). So to avoid this error, ** zero the first 4 bytes of temp space here. ** @@ -70193,7 +73608,7 @@ SQLITE_PRIVATE int sqlite3BtreeGetReserveNoMutex(Btree *p){ /* ** Return the number of bytes of space at the end of every page that -** are intentually left unused. This is the "reserved" space that is +** are intentionally left unused. This is the "reserved" space that is ** sometimes used by extensions. ** ** The value returned is the larger of the current reserve size and @@ -70440,7 +73855,6 @@ static int lockBtree(BtShared *pBt){ ){ goto page1_init_failed; } - pBt->btsFlags |= BTS_PAGESIZE_FIXED; assert( (pageSize & 7)==0 ); /* EVIDENCE-OF: R-59310-51205 The "reserved space" size in the 1-byte ** integer at offset 20 is the number of bytes of space at the end of @@ -70460,6 +73874,7 @@ static int lockBtree(BtShared *pBt){ releasePageOne(pPage1); pBt->usableSize = usableSize; pBt->pageSize = pageSize; + pBt->btsFlags |= BTS_PAGESIZE_FIXED; freeTempSpace(pBt); rc = sqlite3PagerSetPagesize(pBt->pPager, &pBt->pageSize, pageSize-usableSize); @@ -70479,6 +73894,7 @@ static int lockBtree(BtShared *pBt){ if( usableSize<480 ){ goto page1_init_failed; } + pBt->btsFlags |= BTS_PAGESIZE_FIXED; pBt->pageSize = pageSize; pBt->usableSize = usableSize; #ifndef SQLITE_OMIT_AUTOVACUUM @@ -70657,7 +74073,11 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p){ ** when A already has a read lock, we encourage A to give up and let B ** proceed. */ -SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){ +static SQLITE_NOINLINE int btreeBeginTrans( + Btree *p, /* The btree in which to start the transaction */ + int wrflag, /* True to start a write transaction */ + int *pSchemaVersion /* Put schema version number here, if not NULL */ +){ BtShared *pBt = p->pBt; Pager *pPager = pBt->pPager; int rc = SQLITE_OK; @@ -70829,6 +74249,28 @@ SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVers sqlite3BtreeLeave(p); return rc; } +SQLITE_PRIVATE int sqlite3BtreeBeginTrans(Btree *p, int wrflag, int *pSchemaVersion){ + BtShared *pBt; + if( p->sharable + || p->inTrans==TRANS_NONE + || (p->inTrans==TRANS_READ && wrflag!=0) + ){ + return btreeBeginTrans(p,wrflag,pSchemaVersion); + } + pBt = p->pBt; + if( pSchemaVersion ){ + *pSchemaVersion = get4byte(&pBt->pPage1->aData[40]); + } + if( wrflag ){ + /* This call makes sure that the pager has the correct number of + ** open savepoints. If the second parameter is greater than 0 and + ** the sub-journal is not already open, then it will be opened here. + */ + return sqlite3PagerOpenSavepoint(pBt->pPager, p->db->nSavepoint); + }else{ + return SQLITE_OK; + } +} #ifndef SQLITE_OMIT_AUTOVACUUM @@ -70915,6 +74357,9 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){ } } }else{ + if( pCell+4 > pPage->aData+pPage->pBt->usableSize ){ + return SQLITE_CORRUPT_PAGE(pPage); + } if( get4byte(pCell)==iFrom ){ put4byte(pCell, iTo); break; @@ -70963,7 +74408,7 @@ static int relocatePage( if( iDbPage<3 ) return SQLITE_CORRUPT_BKPT; /* Move page iDbPage from its current location to page number iFreePage */ - TRACE(("AUTOVACUUM: Moving %d to free page %d (ptr page %d type %d)\n", + TRACE(("AUTOVACUUM: Moving %u to free page %u (ptr page %u type %u)\n", iDbPage, iFreePage, iPtrPage, eType)); rc = sqlite3PagerMovepage(pPager, pDbPage->pDbPage, iFreePage, isCommit); if( rc!=SQLITE_OK ){ @@ -71921,7 +75366,6 @@ SQLITE_PRIVATE void sqlite3BtreeCursorUnpin(BtCursor *pCur){ pCur->curFlags &= ~BTCF_Pinned; } -#ifdef SQLITE_ENABLE_OFFSET_SQL_FUNC /* ** Return the offset into the database file for the start of the ** payload to which the cursor is pointing. @@ -71933,7 +75377,6 @@ SQLITE_PRIVATE i64 sqlite3BtreeOffset(BtCursor *pCur){ return (i64)pCur->pBt->pageSize*((i64)pCur->pPage->pgno - 1) + (i64)(pCur->info.pPayload - pCur->pPage->aData); } -#endif /* SQLITE_ENABLE_OFFSET_SQL_FUNC */ /* ** Return the number of bytes of payload for the entry that pCur is @@ -71959,7 +75402,7 @@ SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor *pCur){ ** routine always returns 2147483647 (which is the largest record ** that SQLite can handle) or more. But returning a smaller value might ** prevent large memory allocations when trying to interpret a -** corrupt datrabase. +** corrupt database. ** ** The current implementation merely returns the size of the underlying ** database file. @@ -72259,7 +75702,6 @@ static int accessPayload( assert( aWrite>=pBufStart ); /* due to (6) */ memcpy(aSave, aWrite, 4); rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1)); - if( rc && nextPage>pBt->nPage ) rc = SQLITE_CORRUPT_BKPT; nextPage = get4byte(aWrite); memcpy(aWrite, aSave, 4); }else @@ -72421,8 +75863,7 @@ SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor *pCur, u32 *pAmt){ ** vice-versa). */ static int moveToChild(BtCursor *pCur, u32 newPgno){ - BtShared *pBt = pCur->pBt; - + int rc; assert( cursorOwnsBtShared(pCur) ); assert( pCur->eState==CURSOR_VALID ); assert( pCur->iPageapPage[pCur->iPage] = pCur->pPage; pCur->ix = 0; pCur->iPage++; - return getAndInitPage(pBt, newPgno, &pCur->pPage, pCur, pCur->curPagerFlags); + rc = getAndInitPage(pCur->pBt, newPgno, &pCur->pPage, pCur->curPagerFlags); + assert( pCur->pPage!=0 || rc!=SQLITE_OK ); + if( rc==SQLITE_OK + && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey) + ){ + releasePage(pCur->pPage); + rc = SQLITE_CORRUPT_PGNO(newPgno); + } + if( rc ){ + pCur->pPage = pCur->apPage[--pCur->iPage]; + } + return rc; } #ifdef SQLITE_DEBUG @@ -72542,8 +75994,8 @@ static int moveToRoot(BtCursor *pCur){ } sqlite3BtreeClearCursor(pCur); } - rc = getAndInitPage(pCur->pBtree->pBt, pCur->pgnoRoot, &pCur->pPage, - 0, pCur->curPagerFlags); + rc = getAndInitPage(pCur->pBt, pCur->pgnoRoot, &pCur->pPage, + pCur->curPagerFlags); if( rc!=SQLITE_OK ){ pCur->eState = CURSOR_INVALID; return rc; @@ -72655,7 +76107,7 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ *pRes = 0; rc = moveToLeftmost(pCur); }else if( rc==SQLITE_EMPTY ){ - assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); + assert( pCur->pgnoRoot==0 || (pCur->pPage!=0 && pCur->pPage->nCell==0) ); *pRes = 1; rc = SQLITE_OK; } @@ -72666,9 +76118,25 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){ ** on success. Set *pRes to 0 if the cursor actually points to something ** or set *pRes to 1 if the table is empty. */ +static SQLITE_NOINLINE int btreeLast(BtCursor *pCur, int *pRes){ + int rc = moveToRoot(pCur); + if( rc==SQLITE_OK ){ + assert( pCur->eState==CURSOR_VALID ); + *pRes = 0; + rc = moveToRightmost(pCur); + if( rc==SQLITE_OK ){ + pCur->curFlags |= BTCF_AtLast; + }else{ + pCur->curFlags &= ~BTCF_AtLast; + } + }else if( rc==SQLITE_EMPTY ){ + assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); + *pRes = 1; + rc = SQLITE_OK; + } + return rc; +} SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ - int rc; - assert( cursorOwnsBtShared(pCur) ); assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) ); @@ -72689,23 +76157,7 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){ *pRes = 0; return SQLITE_OK; } - - rc = moveToRoot(pCur); - if( rc==SQLITE_OK ){ - assert( pCur->eState==CURSOR_VALID ); - *pRes = 0; - rc = moveToRightmost(pCur); - if( rc==SQLITE_OK ){ - pCur->curFlags |= BTCF_AtLast; - }else{ - pCur->curFlags &= ~BTCF_AtLast; - } - }else if( rc==SQLITE_EMPTY ){ - assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 ); - *pRes = 1; - rc = SQLITE_OK; - } - return rc; + return btreeLast(pCur, pRes); } /* Move the cursor so that it points to an entry in a table (a.k.a INTKEY) @@ -72760,7 +76212,7 @@ SQLITE_PRIVATE int sqlite3BtreeTableMoveto( /* If the requested key is one more than the previous key, then ** try to get there using sqlite3BtreeNext() rather than a full ** binary search. This is an optimization only. The correct answer - ** is still obtained without this case, only a little more slowely */ + ** is still obtained without this case, only a little more slowly. */ if( pCur->info.nKey+1==intKey ){ *pRes = 0; rc = sqlite3BtreeNext(pCur, 0); @@ -73156,10 +76608,36 @@ SQLITE_PRIVATE int sqlite3BtreeIndexMoveto( }else{ chldPg = get4byte(findCell(pPage, lwr)); } - pCur->ix = (u16)lwr; - rc = moveToChild(pCur, chldPg); - if( rc ) break; - } + + /* This block is similar to an in-lined version of: + ** + ** pCur->ix = (u16)lwr; + ** rc = moveToChild(pCur, chldPg); + ** if( rc ) break; + */ + pCur->info.nSize = 0; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); + if( pCur->iPage>=(BTCURSOR_MAX_DEPTH-1) ){ + return SQLITE_CORRUPT_BKPT; + } + pCur->aiIdx[pCur->iPage] = (u16)lwr; + pCur->apPage[pCur->iPage] = pCur->pPage; + pCur->ix = 0; + pCur->iPage++; + rc = getAndInitPage(pCur->pBt, chldPg, &pCur->pPage, pCur->curPagerFlags); + if( rc==SQLITE_OK + && (pCur->pPage->nCell<1 || pCur->pPage->intKey!=pCur->curIntKey) + ){ + releasePage(pCur->pPage); + rc = SQLITE_CORRUPT_PGNO(chldPg); + } + if( rc ){ + pCur->pPage = pCur->apPage[--pCur->iPage]; + break; + } + /* + ***** End of in-lined moveToChild() call */ + } moveto_index_finish: pCur->info.nSize = 0; assert( (pCur->curFlags & BTCF_ValidOvfl)==0 ); @@ -73250,14 +76728,8 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){ pPage = pCur->pPage; idx = ++pCur->ix; - if( !pPage->isInit || sqlite3FaultSim(412) ){ - /* The only known way for this to happen is for there to be a - ** recursive SQL function that does a DELETE operation as part of a - ** SELECT which deletes content out from under an active cursor - ** in a corrupt database file where the table being DELETE-ed from - ** has pages in common with the table being queried. See TH3 - ** module cov1/btree78.test testcase 220 (2018-06-08) for an - ** example. */ + if( sqlite3FaultSim(412) ) pPage->isInit = 0; + if( !pPage->isInit ){ return SQLITE_CORRUPT_BKPT; } @@ -73349,7 +76821,10 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur){ } pPage = pCur->pPage; - assert( pPage->isInit ); + if( sqlite3FaultSim(412) ) pPage->isInit = 0; + if( !pPage->isInit ){ + return SQLITE_CORRUPT_BKPT; + } if( !pPage->leaf ){ int idx = pCur->ix; rc = moveToChild(pCur, get4byte(findCell(pPage, idx))); @@ -73433,8 +76908,8 @@ static int allocateBtreePage( assert( eMode==BTALLOC_ANY || (nearby>0 && IfNotOmitAV(pBt->autoVacuum)) ); pPage1 = pBt->pPage1; mxPage = btreePagecount(pBt); - /* EVIDENCE-OF: R-05119-02637 The 4-byte big-endian integer at offset 36 - ** stores stores the total number of pages on the freelist. */ + /* EVIDENCE-OF: R-21003-45125 The 4-byte big-endian integer at offset 36 + ** stores the total number of pages on the freelist. */ n = get4byte(&pPage1->aData[36]); testcase( n==mxPage-1 ); if( n>=mxPage ){ @@ -73520,7 +76995,7 @@ static int allocateBtreePage( memcpy(&pPage1->aData[32], &pTrunk->aData[0], 4); *ppPage = pTrunk; pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + TRACE(("ALLOCATE: %u trunk - %u free pages left\n", *pPgno, n-1)); }else if( k>(u32)(pBt->usableSize/4 - 2) ){ /* Value of k is out of range. Database corruption */ rc = SQLITE_CORRUPT_PGNO(iTrunk); @@ -73586,7 +77061,7 @@ static int allocateBtreePage( } } pTrunk = 0; - TRACE(("ALLOCATE: %d trunk - %d free pages left\n", *pPgno, n-1)); + TRACE(("ALLOCATE: %u trunk - %u free pages left\n", *pPgno, n-1)); #endif }else if( k>0 ){ /* Extract a leaf from the trunk */ @@ -73631,8 +77106,8 @@ static int allocateBtreePage( ){ int noContent; *pPgno = iPage; - TRACE(("ALLOCATE: %d was leaf %d of %d on trunk %d" - ": %d more free pages\n", + TRACE(("ALLOCATE: %u was leaf %u of %u on trunk %u" + ": %u more free pages\n", *pPgno, closest+1, k, pTrunk->pgno, n-1)); rc = sqlite3PagerWrite(pTrunk->pDbPage); if( rc ) goto end_allocate_page; @@ -73688,7 +77163,7 @@ static int allocateBtreePage( ** becomes a new pointer-map page, the second is used by the caller. */ MemPage *pPg = 0; - TRACE(("ALLOCATE: %d from end of file (pointer-map page)\n", pBt->nPage)); + TRACE(("ALLOCATE: %u from end of file (pointer-map page)\n", pBt->nPage)); assert( pBt->nPage!=PENDING_BYTE_PAGE(pBt) ); rc = btreeGetUnusedPage(pBt, pBt->nPage, &pPg, bNoContent); if( rc==SQLITE_OK ){ @@ -73711,7 +77186,7 @@ static int allocateBtreePage( releasePage(*ppPage); *ppPage = 0; } - TRACE(("ALLOCATE: %d from end of file\n", *pPgno)); + TRACE(("ALLOCATE: %u from end of file\n", *pPgno)); } assert( CORRUPT_DB || *pPgno!=PENDING_BYTE_PAGE(pBt) ); @@ -73779,7 +77254,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ /* If the database supports auto-vacuum, write an entry in the pointer-map ** to indicate that the page is free. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc); if( rc ) goto freepage_out; } @@ -73839,7 +77314,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ } rc = btreeSetHasContent(pBt, iPage); } - TRACE(("FREE-PAGE: %d leaf on trunk page %d\n",pPage->pgno,pTrunk->pgno)); + TRACE(("FREE-PAGE: %u leaf on trunk page %u\n",pPage->pgno,pTrunk->pgno)); goto freepage_out; } } @@ -73860,7 +77335,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){ put4byte(pPage->aData, iTrunk); put4byte(&pPage->aData[4], 0); put4byte(&pPage1->aData[32], iPage); - TRACE(("FREE-PAGE: %d new trunk page replacing %d\n", pPage->pgno, iTrunk)); + TRACE(("FREE-PAGE: %u new trunk page replacing %u\n", pPage->pgno, iTrunk)); freepage_out: if( pPage ){ @@ -73949,7 +77424,7 @@ static SQLITE_NOINLINE int clearCellOverflow( /* Call xParseCell to compute the size of a cell. If the cell contains ** overflow, then invoke cellClearOverflow to clear out that overflow. -** STore the result code (SQLITE_OK or some error code) in rc. +** Store the result code (SQLITE_OK or some error code) in rc. ** ** Implemented as macro to force inlining for performance. */ @@ -74022,7 +77497,10 @@ static int fillInCell( n = nHeader + nPayload; testcase( n==3 ); testcase( n==4 ); - if( n<4 ) n = 4; + if( n<4 ){ + n = 4; + pPayload[nPayload] = 0; + } *pnSize = n; assert( nSrc<=nPayload ); testcase( nSrcpBt->usableSize > (u32)(ptr-data) ); pc = get2byte(ptr); hdr = pPage->hdrOffset; -#if 0 /* Not required. Omit for efficiency */ - if( pcnCell*2 ){ - *pRC = SQLITE_CORRUPT_BKPT; - return; - } -#endif testcase( pc==(u32)get2byte(&data[hdr+5]) ); testcase( pc+sz==pPage->pBt->usableSize ); if( pc+sz > pPage->pBt->usableSize ){ @@ -74226,23 +77698,27 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){ ** Allocating a new entry in pPage->aCell[] implies that ** pPage->nOverflow is incremented. ** -** *pRC must be SQLITE_OK when this routine is called. +** The insertCellFast() routine below works exactly the same as +** insertCell() except that it lacks the pTemp and iChild parameters +** which are assumed zero. Other than that, the two routines are the +** same. +** +** Fixes or enhancements to this routine should be reflected in +** insertCellFast()! */ -static void insertCell( +static int insertCell( MemPage *pPage, /* Page into which we are copying */ int i, /* New cell becomes the i-th cell of the page */ u8 *pCell, /* Content of the new cell */ int sz, /* Bytes of content in pCell */ u8 *pTemp, /* Temp storage space for pCell, if needed */ - Pgno iChild, /* If non-zero, replace first 4 bytes with this value */ - int *pRC /* Read and write return code from here */ + Pgno iChild /* If non-zero, replace first 4 bytes with this value */ ){ int idx = 0; /* Where to write new cell content in data[] */ int j; /* Loop counter */ u8 *data; /* The content of the whole page */ u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ - assert( *pRC==SQLITE_OK ); assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); assert( MX_CELL(pPage->pBt)<=10921 ); assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); @@ -74251,14 +77727,103 @@ static void insertCell( assert( sqlite3_mutex_held(pPage->pBt->mutex) ); assert( sz==pPage->xCellSize(pPage, pCell) || CORRUPT_DB ); assert( pPage->nFree>=0 ); + assert( iChild>0 ); if( pPage->nOverflow || sz+2>pPage->nFree ){ if( pTemp ){ memcpy(pTemp, pCell, sz); pCell = pTemp; } - if( iChild ){ - put4byte(pCell, iChild); + put4byte(pCell, iChild); + j = pPage->nOverflow++; + /* Comparison against ArraySize-1 since we hold back one extra slot + ** as a contingency. In other words, never need more than 3 overflow + ** slots but 4 are allocated, just to be safe. */ + assert( j < ArraySize(pPage->apOvfl)-1 ); + pPage->apOvfl[j] = pCell; + pPage->aiOvfl[j] = (u16)i; + + /* When multiple overflows occur, they are always sequential and in + ** sorted order. This invariants arise because multiple overflows can + ** only occur when inserting divider cells into the parent page during + ** balancing, and the dividers are adjacent and sorted. + */ + assert( j==0 || pPage->aiOvfl[j-1]<(u16)i ); /* Overflows in sorted order */ + assert( j==0 || i==pPage->aiOvfl[j-1]+1 ); /* Overflows are sequential */ + }else{ + int rc = sqlite3PagerWrite(pPage->pDbPage); + if( NEVER(rc!=SQLITE_OK) ){ + return rc; + } + assert( sqlite3PagerIswriteable(pPage->pDbPage) ); + data = pPage->aData; + assert( &data[pPage->cellOffset]==pPage->aCellIdx ); + rc = allocateSpace(pPage, sz, &idx); + if( rc ){ return rc; } + /* The allocateSpace() routine guarantees the following properties + ** if it returns successfully */ + assert( idx >= 0 ); + assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); + assert( idx+sz <= (int)pPage->pBt->usableSize ); + pPage->nFree -= (u16)(2 + sz); + /* In a corrupt database where an entry in the cell index section of + ** a btree page has a value of 3 or less, the pCell value might point + ** as many as 4 bytes in front of the start of the aData buffer for + ** the source page. Make sure this does not cause problems by not + ** reading the first 4 bytes */ + memcpy(&data[idx+4], pCell+4, sz-4); + put4byte(&data[idx], iChild); + pIns = pPage->aCellIdx + i*2; + memmove(pIns+2, pIns, 2*(pPage->nCell - i)); + put2byte(pIns, idx); + pPage->nCell++; + /* increment the cell count */ + if( (++data[pPage->hdrOffset+4])==0 ) data[pPage->hdrOffset+3]++; + assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB ); +#ifndef SQLITE_OMIT_AUTOVACUUM + if( pPage->pBt->autoVacuum ){ + int rc2 = SQLITE_OK; + /* The cell may contain a pointer to an overflow page. If so, write + ** the entry for the overflow page into the pointer map. + */ + ptrmapPutOvflPtr(pPage, pPage, pCell, &rc2); + if( rc2 ) return rc2; } +#endif + } + return SQLITE_OK; +} + +/* +** This variant of insertCell() assumes that the pTemp and iChild +** parameters are both zero. Use this variant in sqlite3BtreeInsert() +** for performance improvement, and also so that this variant is only +** called from that one place, and is thus inlined, and thus runs must +** faster. +** +** Fixes or enhancements to this routine should be reflected into +** the insertCell() routine. +*/ +static int insertCellFast( + MemPage *pPage, /* Page into which we are copying */ + int i, /* New cell becomes the i-th cell of the page */ + u8 *pCell, /* Content of the new cell */ + int sz /* Bytes of content in pCell */ +){ + int idx = 0; /* Where to write new cell content in data[] */ + int j; /* Loop counter */ + u8 *data; /* The content of the whole page */ + u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */ + + assert( i>=0 && i<=pPage->nCell+pPage->nOverflow ); + assert( MX_CELL(pPage->pBt)<=10921 ); + assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB ); + assert( pPage->nOverflow<=ArraySize(pPage->apOvfl) ); + assert( ArraySize(pPage->apOvfl)==ArraySize(pPage->aiOvfl) ); + assert( sqlite3_mutex_held(pPage->pBt->mutex) ); + assert( sz==pPage->xCellSize(pPage, pCell) || CORRUPT_DB ); + assert( pPage->nFree>=0 ); + assert( pPage->nOverflow==0 ); + if( sz+2>pPage->nFree ){ j = pPage->nOverflow++; /* Comparison against ArraySize-1 since we hold back one extra slot ** as a contingency. In other words, never need more than 3 overflow @@ -74277,31 +77842,20 @@ static void insertCell( }else{ int rc = sqlite3PagerWrite(pPage->pDbPage); if( rc!=SQLITE_OK ){ - *pRC = rc; - return; + return rc; } assert( sqlite3PagerIswriteable(pPage->pDbPage) ); data = pPage->aData; assert( &data[pPage->cellOffset]==pPage->aCellIdx ); rc = allocateSpace(pPage, sz, &idx); - if( rc ){ *pRC = rc; return; } + if( rc ){ return rc; } /* The allocateSpace() routine guarantees the following properties ** if it returns successfully */ assert( idx >= 0 ); assert( idx >= pPage->cellOffset+2*pPage->nCell+2 || CORRUPT_DB ); assert( idx+sz <= (int)pPage->pBt->usableSize ); pPage->nFree -= (u16)(2 + sz); - if( iChild ){ - /* In a corrupt database where an entry in the cell index section of - ** a btree page has a value of 3 or less, the pCell value might point - ** as many as 4 bytes in front of the start of the aData buffer for - ** the source page. Make sure this does not cause problems by not - ** reading the first 4 bytes */ - memcpy(&data[idx+4], pCell+4, sz-4); - put4byte(&data[idx], iChild); - }else{ - memcpy(&data[idx], pCell, sz); - } + memcpy(&data[idx], pCell, sz); pIns = pPage->aCellIdx + i*2; memmove(pIns+2, pIns, 2*(pPage->nCell - i)); put2byte(pIns, idx); @@ -74311,13 +77865,16 @@ static void insertCell( assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB ); #ifndef SQLITE_OMIT_AUTOVACUUM if( pPage->pBt->autoVacuum ){ + int rc2 = SQLITE_OK; /* The cell may contain a pointer to an overflow page. If so, write ** the entry for the overflow page into the pointer map. */ - ptrmapPutOvflPtr(pPage, pPage, pCell, pRC); + ptrmapPutOvflPtr(pPage, pPage, pCell, &rc2); + if( rc2 ) return rc2; } #endif } + return SQLITE_OK; } /* @@ -74418,14 +77975,16 @@ struct CellArray { ** computed. */ static void populateCellCache(CellArray *p, int idx, int N){ + MemPage *pRef = p->pRef; + u16 *szCell = p->szCell; assert( idx>=0 && idx+N<=p->nCell ); while( N>0 ){ assert( p->apCell[idx]!=0 ); - if( p->szCell[idx]==0 ){ - p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]); + if( szCell[idx]==0 ){ + szCell[idx] = pRef->xCellSize(pRef, p->apCell[idx]); }else{ assert( CORRUPT_DB || - p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) ); + szCell[idx]==pRef->xCellSize(pRef, p->apCell[idx]) ); } idx++; N--; @@ -74479,12 +78038,13 @@ static int rebuildPage( int k; /* Current slot in pCArray->apEnd[] */ u8 *pSrcEnd; /* Current pCArray->apEnd[k] value */ + assert( nCell>0 ); assert( i(u32)usableSize ){ j = 0; } memcpy(&pTmp[j], &aData[j], usableSize - j); - for(k=0; pCArray->ixNx[k]<=i && ALWAYS(kixNx[k]<=i; k++){} pSrcEnd = pCArray->apEnd[k]; pData = pEnd; @@ -74547,7 +78107,7 @@ static int rebuildPage( ** Finally, argument pBegin points to the byte immediately following the ** end of the space required by this page for the cell-pointer area (for ** all cells - not just those inserted by the current call). If the content -** area must be extended to before this point in order to accomodate all +** area must be extended to before this point in order to accommodate all ** cells in apCell[], then the cells do not fit and non-zero is returned. */ static int pageInsertArray( @@ -74567,7 +78127,7 @@ static int pageInsertArray( u8 *pEnd; /* Maximum extent of cell data */ assert( CORRUPT_DB || pPg->hdrOffset==0 ); /* Never called on page 1 */ if( iEnd<=iFirst ) return 0; - for(k=0; pCArray->ixNx[k]<=i && ALWAYS(kixNx[k]<=i ; k++){} pEnd = pCArray->apEnd[k]; while( 1 /*Exit by break*/ ){ int sz, rc; @@ -74625,39 +78185,50 @@ static int pageFreeArray( u8 * const pEnd = &aData[pPg->pBt->usableSize]; u8 * const pStart = &aData[pPg->hdrOffset + 8 + pPg->childPtrSize]; int nRet = 0; - int i; + int i, j; int iEnd = iFirst + nCell; - u8 *pFree = 0; - int szFree = 0; + int nFree = 0; + int aOfst[10]; + int aAfter[10]; for(i=iFirst; iapCell[i]; if( SQLITE_WITHIN(pCell, pStart, pEnd) ){ int sz; + int iAfter; + int iOfst; /* No need to use cachedCellSize() here. The sizes of all cells that ** are to be freed have already been computing while deciding which ** cells need freeing */ sz = pCArray->szCell[i]; assert( sz>0 ); - if( pFree!=(pCell + sz) ){ - if( pFree ){ - assert( pFree>aData && (pFree - aData)<65536 ); - freeSpace(pPg, (u16)(pFree - aData), szFree); - } - pFree = pCell; - szFree = sz; - if( pFree+sz>pEnd ){ - return 0; + iOfst = (u16)(pCell - aData); + iAfter = iOfst+sz; + for(j=0; j=nFree ){ + if( nFree>=(int)(sizeof(aOfst)/sizeof(aOfst[0])) ){ + for(j=0; jpEnd ) return 0; + nFree++; } nRet++; } } - if( pFree ){ - assert( pFree>aData && (pFree - aData)<65536 ); - freeSpace(pPg, (u16)(pFree - aData), szFree); + for(j=0; jpPg->aDataEnd ) goto editpage_fail; + if( NEVER(pData>pPg->aDataEnd) ) goto editpage_fail; /* Add cells to the start of the page */ if( iNewpgno, &rc); if( szCell>pNew->minLocal ){ ptrmapPutOvflPtr(pNew, pNew, pCell, &rc); @@ -74884,8 +78456,8 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){ /* Insert the new divider cell into pParent. */ if( rc==SQLITE_OK ){ - insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace), - 0, pPage->pgno, &rc); + rc = insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace), + 0, pPage->pgno); } /* Set the right-child pointer of pParent to point to the new page. */ @@ -74994,7 +78566,7 @@ static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){ /* If this is an auto-vacuum database, update the pointer-map entries ** for any b-tree or overflow pages that pTo now contains the pointers to. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ *pRC = setChildPtrmaps(pTo); } } @@ -75127,7 +78699,7 @@ static int balance_nonroot( pgno = get4byte(pRight); while( 1 ){ if( rc==SQLITE_OK ){ - rc = getAndInitPage(pBt, pgno, &apOld[i], 0, 0); + rc = getAndInitPage(pBt, pgno, &apOld[i], 0); } if( rc ){ memset(apOld, 0, (i+1)*sizeof(MemPage*)); @@ -75418,15 +78990,17 @@ static int balance_nonroot( d = r + 1 - leafData; (void)cachedCellSize(&b, d); do{ + int szR, szD; assert( d szLeft-(b.szCell[r]+(i==k-1?0:2)))){ + && (bBulk || szRight+szD+2 > szLeft-(szR+(i==k-1?0:2)))){ break; } - szRight += b.szCell[d] + 2; - szLeft -= b.szCell[r] + 2; + szRight += szD + 2; + szLeft -= szR + 2; cntNew[i-1] = r; r--; d--; @@ -75439,7 +79013,7 @@ static int balance_nonroot( } } - /* Sanity check: For a non-corrupt database file one of the follwing + /* Sanity check: For a non-corrupt database file one of the following ** must be true: ** (1) We found one or more cells (cntNew[0])>0), or ** (2) pPage is a virtual root page. A virtual root page is when @@ -75447,7 +79021,7 @@ static int balance_nonroot( ** that page. */ assert( cntNew[0]>0 || (pParent->pgno==1 && pParent->nCell==0) || CORRUPT_DB); - TRACE(("BALANCE: old: %d(nc=%d) %d(nc=%d) %d(nc=%d)\n", + TRACE(("BALANCE: old: %u(nc=%u) %u(nc=%u) %u(nc=%u)\n", apOld[0]->pgno, apOld[0]->nCell, nOld>=2 ? apOld[1]->pgno : 0, nOld>=2 ? apOld[1]->nCell : 0, nOld>=3 ? apOld[2]->pgno : 0, nOld>=3 ? apOld[2]->nCell : 0 @@ -75480,7 +79054,7 @@ static int balance_nonroot( cntOld[i] = b.nCell; /* Set the pointer-map entry for the new sibling page. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc); if( rc!=SQLITE_OK ){ goto balance_cleanup; @@ -75531,8 +79105,8 @@ static int balance_nonroot( } } - TRACE(("BALANCE: new: %d(%d nc=%d) %d(%d nc=%d) %d(%d nc=%d) " - "%d(%d nc=%d) %d(%d nc=%d)\n", + TRACE(("BALANCE: new: %u(%u nc=%u) %u(%u nc=%u) %u(%u nc=%u) " + "%u(%u nc=%u) %u(%u nc=%u)\n", apNew[0]->pgno, szNew[0], cntNew[0], nNew>=2 ? apNew[1]->pgno : 0, nNew>=2 ? szNew[1] : 0, nNew>=2 ? cntNew[1] - cntNew[0] - !leafData : 0, @@ -75573,7 +79147,7 @@ static int balance_nonroot( ** updated. This happens below, after the sibling pages have been ** populated, not here. */ - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ MemPage *pOld; MemPage *pNew = pOld = apNew[0]; int cntOldNext = pNew->nCell + pNew->nOverflow; @@ -75664,13 +79238,13 @@ static int balance_nonroot( iOvflSpace += sz; assert( sz<=pBt->maxLocal+23 ); assert( iOvflSpace <= (int)pBt->pageSize ); - for(k=0; b.ixNx[k]<=j && ALWAYS(kpgno, &rc); + rc = insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno); if( rc!=SQLITE_OK ) goto balance_cleanup; assert( sqlite3PagerIswriteable(pParent->pDbPage) ); } @@ -75700,6 +79274,8 @@ static int balance_nonroot( for(i=1-nNew; i=0 && iPg=1 || i>=0 ); + assert( iPg=0 /* On the upwards pass, or... */ || cntOld[iPg-1]>=cntNew[iPg-1] /* Condition (1) is true */ @@ -75766,7 +79342,7 @@ static int balance_nonroot( ); copyNodeContent(apNew[0], pParent, &rc); freePage(apNew[0], &rc); - }else if( ISAUTOVACUUM && !leafCorrection ){ + }else if( ISAUTOVACUUM(pBt) && !leafCorrection ){ /* Fix the pointer map entries associated with the right-child of each ** sibling page. All other pointer map entries have already been taken ** care of. */ @@ -75777,7 +79353,7 @@ static int balance_nonroot( } assert( pParent->isInit ); - TRACE(("BALANCE: finished: old=%d new=%d cells=%d\n", + TRACE(("BALANCE: finished: old=%u new=%u cells=%u\n", nOld, nNew, b.nCell)); /* Free any old pages that were not reused as new pages. @@ -75787,7 +79363,7 @@ static int balance_nonroot( } #if 0 - if( ISAUTOVACUUM && rc==SQLITE_OK && apNew[0]->isInit ){ + if( ISAUTOVACUUM(pBt) && rc==SQLITE_OK && apNew[0]->isInit ){ /* The ptrmapCheckPages() contains assert() statements that verify that ** all pointer map pages are set correctly. This is helpful while ** debugging. This is usually disabled because a corrupt database may @@ -75849,7 +79425,7 @@ static int balance_deeper(MemPage *pRoot, MemPage **ppChild){ if( rc==SQLITE_OK ){ rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0); copyNodeContent(pRoot, pChild, &rc); - if( ISAUTOVACUUM ){ + if( ISAUTOVACUUM(pBt) ){ ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc); } } @@ -75862,7 +79438,7 @@ static int balance_deeper(MemPage *pRoot, MemPage **ppChild){ assert( sqlite3PagerIswriteable(pRoot->pDbPage) ); assert( pChild->nCell==pRoot->nCell || CORRUPT_DB ); - TRACE(("BALANCE: copy root %d into %d\n", pRoot->pgno, pChild->pgno)); + TRACE(("BALANCE: copy root %u into %u\n", pRoot->pgno, pChild->pgno)); /* Copy the overflow cells from pRoot to pChild */ memcpy(pChild->aiOvfl, pRoot->aiOvfl, @@ -75953,6 +79529,11 @@ static int balance(BtCursor *pCur){ }else{ break; } + }else if( sqlite3PagerPageRefcount(pPage->pDbPage)>1 ){ + /* The page being written is not a root page, and there is currently + ** more than one reference to it. This only happens if the page is one + ** of its own ancestor pages. Corruption. */ + rc = SQLITE_CORRUPT_BKPT; }else{ MemPage * const pParent = pCur->apPage[iPage-1]; int const iIdx = pCur->aiIdx[iPage-1]; @@ -76051,7 +79632,7 @@ static int btreeOverwriteContent( ){ int nData = pX->nData - iOffset; if( nData<=0 ){ - /* Overwritting with zeros */ + /* Overwriting with zeros */ int i; for(i=0; ipData to write */ int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */ int rc; /* Return code */ @@ -76094,16 +79679,12 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ Pgno ovflPgno; /* Next overflow page to write */ u32 ovflPageSize; /* Size to write on overflow page */ - if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd - || pCur->info.pPayload < pPage->aData + pPage->cellOffset - ){ - return SQLITE_CORRUPT_BKPT; - } + assert( pCur->info.nLocalinfo.pPayload, pX, 0, pCur->info.nLocal); if( rc ) return rc; - if( pCur->info.nLocal==nTotal ) return SQLITE_OK; /* Now overwrite the overflow pages */ iOffset = pCur->info.nLocal; @@ -76133,6 +79714,29 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ return SQLITE_OK; } +/* +** Overwrite the cell that cursor pCur is pointing to with fresh content +** contained in pX. +*/ +static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){ + int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */ + MemPage *pPage = pCur->pPage; /* Page being written */ + + if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd + || pCur->info.pPayload < pPage->aData + pPage->cellOffset + ){ + return SQLITE_CORRUPT_BKPT; + } + if( pCur->info.nLocal==nTotal ){ + /* The entire cell is local */ + return btreeOverwriteContent(pPage, pCur->info.pPayload, pX, + 0, pCur->info.nLocal); + }else{ + /* The cell contains overflow content */ + return btreeOverwriteOverflowCell(pCur, pX); + } +} + /* ** Insert a new record into the BTree. The content of the new record @@ -76176,7 +79780,6 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( int idx; MemPage *pPage; Btree *p = pCur->pBtree; - BtShared *pBt = p->pBt; unsigned char *oldCell; unsigned char *newCell = 0; @@ -76195,7 +79798,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** not to clear the cursor here. */ if( pCur->curFlags & BTCF_Multiple ){ - rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur); + rc = saveAllCursors(p->pBt, pCur->pgnoRoot, pCur); if( rc ) return rc; if( loc && pCur->iPage<0 ){ /* This can only happen if the schema is corrupt such that there is more @@ -76219,8 +79822,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( assert( cursorOwnsBtShared(pCur) ); assert( (pCur->curFlags & BTCF_WriteFlag)!=0 - && pBt->inTransaction==TRANS_WRITE - && (pBt->btsFlags & BTS_READ_ONLY)==0 ); + && p->pBt->inTransaction==TRANS_WRITE + && (p->pBt->btsFlags & BTS_READ_ONLY)==0 ); assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) ); /* Assert that the caller has been consistent. If this cursor was opened @@ -76318,7 +79921,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( } } assert( pCur->eState==CURSOR_VALID - || (pCur->eState==CURSOR_INVALID && loc) ); + || (pCur->eState==CURSOR_INVALID && loc) || CORRUPT_DB ); pPage = pCur->pPage; assert( pPage->intKey || pX->nKey>=0 || (flags & BTREE_PREFORMAT) ); @@ -76333,31 +79936,37 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( if( rc ) return rc; } - TRACE(("INSERT: table=%d nkey=%lld ndata=%d page=%d %s\n", + TRACE(("INSERT: table=%u nkey=%lld ndata=%u page=%u %s\n", pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno, loc==0 ? "overwrite" : "new entry")); assert( pPage->isInit || CORRUPT_DB ); - newCell = pBt->pTmpSpace; + newCell = p->pBt->pTmpSpace; assert( newCell!=0 ); + assert( BTREE_PREFORMAT==OPFLAG_PREFORMAT ); if( flags & BTREE_PREFORMAT ){ rc = SQLITE_OK; - szNew = pBt->nPreformatSize; - if( szNew<4 ) szNew = 4; - if( ISAUTOVACUUM && szNew>pPage->maxLocal ){ + szNew = p->pBt->nPreformatSize; + if( szNew<4 ){ + szNew = 4; + newCell[3] = 0; + } + if( ISAUTOVACUUM(p->pBt) && szNew>pPage->maxLocal ){ CellInfo info; pPage->xParseCell(pPage, newCell, &info); if( info.nPayload!=info.nLocal ){ Pgno ovfl = get4byte(&newCell[szNew-4]); - ptrmapPut(pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc); + ptrmapPut(p->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc); + if( NEVER(rc) ) goto end_insert; } } }else{ rc = fillInCell(pPage, newCell, pX, &szNew); + if( rc ) goto end_insert; } - if( rc ) goto end_insert; assert( szNew==pPage->xCellSize(pPage, newCell) ); - assert( szNew <= MX_CELL_SIZE(pBt) ); + assert( szNew <= MX_CELL_SIZE(p->pBt) ); idx = pCur->ix; + pCur->info.nSize = 0; if( loc==0 ){ CellInfo info; assert( idx>=0 ); @@ -76376,7 +79985,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( testcase( pCur->curFlags & BTCF_ValidOvfl ); invalidateOverflowCache(pCur); if( info.nSize==szNew && info.nLocal==info.nPayload - && (!ISAUTOVACUUM || szNewminLocal) + && (!ISAUTOVACUUM(p->pBt) || szNewminLocal) ){ /* Overwrite the old cell with the new if they are the same size. ** We could also try to do this if the old cell is smaller, then add @@ -76402,11 +80011,11 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( }else if( loc<0 && pPage->nCell>0 ){ assert( pPage->leaf ); idx = ++pCur->ix; - pCur->curFlags &= ~BTCF_ValidNKey; + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); }else{ assert( pPage->leaf ); } - insertCell(pPage, idx, newCell, szNew, 0, 0, &rc); + rc = insertCellFast(pPage, idx, newCell, szNew); assert( pPage->nOverflow==0 || rc==SQLITE_OK ); assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 ); @@ -76430,10 +80039,9 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** larger than the largest existing key, it is possible to insert the ** row without seeking the cursor. This can be a big performance boost. */ - pCur->info.nSize = 0; if( pPage->nOverflow ){ assert( rc==SQLITE_OK ); - pCur->curFlags &= ~(BTCF_ValidNKey); + pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl); rc = balance(pCur); /* Must make sure nOverflow is reset to zero even if the balance() @@ -76479,7 +80087,6 @@ SQLITE_PRIVATE int sqlite3BtreeInsert( ** SQLITE_OK is returned if successful, or an SQLite error code otherwise. */ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 iKey){ - int rc = SQLITE_OK; BtShared *pBt = pDest->pBt; u8 *aOut = pBt->pTmpSpace; /* Pointer to next output buffer */ const u8 *aIn; /* Pointer to next input buffer */ @@ -76502,7 +80109,9 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 if( nIn==nRem && nInpPage->maxLocal ){ memcpy(aOut, aIn, nIn); pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace); + return SQLITE_OK; }else{ + int rc = SQLITE_OK; Pager *pSrcPager = pSrc->pBt->pPager; u8 *pPgnoOut = 0; Pgno ovflIn = 0; @@ -76554,7 +80163,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 MemPage *pNew = 0; rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0); put4byte(pPgnoOut, pgnoNew); - if( ISAUTOVACUUM && pPageOut ){ + if( ISAUTOVACUUM(pBt) && pPageOut ){ ptrmapPut(pBt, pgnoNew, PTRMAP_OVERFLOW2, pPageOut->pgno, &rc); } releasePage(pPageOut); @@ -76570,9 +80179,8 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 releasePage(pPageOut); sqlite3PagerUnref(pPageIn); + return rc; } - - return rc; } /* @@ -76631,6 +80239,9 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ if( pPage->nFree<0 && btreeComputeFreeSpace(pPage) ){ return SQLITE_CORRUPT_BKPT; } + if( pCell<&pPage->aCellIdx[pPage->nCell] ){ + return SQLITE_CORRUPT_BKPT; + } /* If the BTREE_SAVEPOSITION bit is on, then the cursor position must ** be preserved following this delete operation. If the current delete @@ -76727,7 +80338,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){ assert( pTmp!=0 ); rc = sqlite3PagerWrite(pLeaf->pDbPage); if( rc==SQLITE_OK ){ - insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc); + rc = insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n); } dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc); if( rc ) return rc; @@ -76807,7 +80418,7 @@ static int btreeCreateTable(Btree *p, Pgno *piTable, int createTabFlags){ MemPage *pRoot; Pgno pgnoRoot; int rc; - int ptfFlags; /* Page-type flage for the root page of new table */ + int ptfFlags; /* Page-type flags for the root page of new table */ assert( sqlite3BtreeHoldsMutex(p) ); assert( pBt->inTransaction==TRANS_WRITE ); @@ -76976,7 +80587,7 @@ static int clearDatabasePage( if( pgno>btreePagecount(pBt) ){ return SQLITE_CORRUPT_BKPT; } - rc = getAndInitPage(pBt, pgno, &pPage, 0, 0); + rc = getAndInitPage(pBt, pgno, &pPage, 0); if( rc ) return rc; if( (pBt->openFlags & BTREE_SINGLE)==0 && sqlite3PagerPageRefcount(pPage->pDbPage) != (1 + (pgno==1)) @@ -77326,6 +80937,41 @@ SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){ } #ifndef SQLITE_OMIT_INTEGRITY_CHECK +/* +** Record an OOM error during integrity_check +*/ +static void checkOom(IntegrityCk *pCheck){ + pCheck->rc = SQLITE_NOMEM; + pCheck->mxErr = 0; /* Causes integrity_check processing to stop */ + if( pCheck->nErr==0 ) pCheck->nErr++; +} + +/* +** Invoke the progress handler, if appropriate. Also check for an +** interrupt. +*/ +static void checkProgress(IntegrityCk *pCheck){ + sqlite3 *db = pCheck->db; + if( AtomicLoad(&db->u1.isInterrupted) ){ + pCheck->rc = SQLITE_INTERRUPT; + pCheck->nErr++; + pCheck->mxErr = 0; + } +#ifndef SQLITE_OMIT_PROGRESS_CALLBACK + if( db->xProgress ){ + assert( db->nProgressOps>0 ); + pCheck->nStep++; + if( (pCheck->nStep % db->nProgressOps)==0 + && db->xProgress(db->pProgressArg) + ){ + pCheck->rc = SQLITE_INTERRUPT; + pCheck->nErr++; + pCheck->mxErr = 0; + } + } +#endif +} + /* ** Append a message to the error message string. */ @@ -77335,6 +80981,7 @@ static void checkAppendMsg( ... ){ va_list ap; + checkProgress(pCheck); if( !pCheck->mxErr ) return; pCheck->mxErr--; pCheck->nErr++; @@ -77343,12 +80990,13 @@ static void checkAppendMsg( sqlite3_str_append(&pCheck->errMsg, "\n", 1); } if( pCheck->zPfx ){ - sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, pCheck->v1, pCheck->v2); + sqlite3_str_appendf(&pCheck->errMsg, pCheck->zPfx, + pCheck->v0, pCheck->v1, pCheck->v2); } sqlite3_str_vappendf(&pCheck->errMsg, zFormat, ap); va_end(ap); if( pCheck->errMsg.accError==SQLITE_NOMEM ){ - pCheck->bOomFault = 1; + checkOom(pCheck); } } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ @@ -77360,7 +81008,8 @@ static void checkAppendMsg( ** corresponds to page iPg is already set. */ static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){ - assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); + assert( pCheck->aPgRef!=0 ); + assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 ); return (pCheck->aPgRef[iPg/8] & (1 << (iPg & 0x07))); } @@ -77368,7 +81017,8 @@ static int getPageReferenced(IntegrityCk *pCheck, Pgno iPg){ ** Set the bit in the IntegrityCk.aPgRef[] array that corresponds to page iPg. */ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ - assert( iPg<=pCheck->nPage && sizeof(pCheck->aPgRef[0])==1 ); + assert( pCheck->aPgRef!=0 ); + assert( iPg<=pCheck->nCkPage && sizeof(pCheck->aPgRef[0])==1 ); pCheck->aPgRef[iPg/8] |= (1 << (iPg & 0x07)); } @@ -77382,15 +81032,14 @@ static void setPageReferenced(IntegrityCk *pCheck, Pgno iPg){ ** Also check that the page number is in bounds. */ static int checkRef(IntegrityCk *pCheck, Pgno iPage){ - if( iPage>pCheck->nPage || iPage==0 ){ - checkAppendMsg(pCheck, "invalid page number %d", iPage); + if( iPage>pCheck->nCkPage || iPage==0 ){ + checkAppendMsg(pCheck, "invalid page number %u", iPage); return 1; } if( getPageReferenced(pCheck, iPage) ){ - checkAppendMsg(pCheck, "2nd reference to page %d", iPage); + checkAppendMsg(pCheck, "2nd reference to page %u", iPage); return 1; } - if( AtomicLoad(&pCheck->db->u1.isInterrupted) ) return 1; setPageReferenced(pCheck, iPage); return 0; } @@ -77413,14 +81062,14 @@ static void checkPtrmap( rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent); if( rc!=SQLITE_OK ){ - if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->bOomFault = 1; - checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild); + if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) checkOom(pCheck); + checkAppendMsg(pCheck, "Failed to read ptrmap key=%u", iChild); return; } if( ePtrmapType!=eType || iPtrmapParent!=iParent ){ checkAppendMsg(pCheck, - "Bad ptr map entry key=%d expected=(%d,%d) got=(%d,%d)", + "Bad ptr map entry key=%u expected=(%u,%u) got=(%u,%u)", iChild, eType, iParent, ePtrmapType, iPtrmapParent); } } @@ -77445,7 +81094,7 @@ static void checkList( if( checkRef(pCheck, iPage) ) break; N--; if( sqlite3PagerGet(pCheck->pPager, (Pgno)iPage, &pOvflPage, 0) ){ - checkAppendMsg(pCheck, "failed to get page %d", iPage); + checkAppendMsg(pCheck, "failed to get page %u", iPage); break; } pOvflData = (unsigned char *)sqlite3PagerGetData(pOvflPage); @@ -77458,7 +81107,7 @@ static void checkList( #endif if( n>pCheck->pBt->usableSize/4-2 ){ checkAppendMsg(pCheck, - "freelist leaf count too big on page %d", iPage); + "freelist leaf count too big on page %u", iPage); N--; }else{ for(i=0; i<(int)n; i++){ @@ -77490,7 +81139,7 @@ static void checkList( } if( N && nErrAtStart==pCheck->nErr ){ checkAppendMsg(pCheck, - "%s is %d but should be %d", + "%s is %u but should be %u", isFreeList ? "size" : "overflow list length", expected-N, expected); } @@ -77520,7 +81169,9 @@ static void checkList( ** lower 16 bits are the index of the last byte of that range. */ static void btreeHeapInsert(u32 *aHeap, u32 x){ - u32 j, i = ++aHeap[0]; + u32 j, i; + assert( aHeap!=0 ); + i = ++aHeap[0]; aHeap[i] = x; while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){ x = aHeap[j]; @@ -77597,15 +81248,18 @@ static int checkTreePage( /* Check that the page exists */ + checkProgress(pCheck); + if( pCheck->mxErr==0 ) goto end_of_check; pBt = pCheck->pBt; usableSize = pBt->usableSize; if( iPage==0 ) return 0; if( checkRef(pCheck, iPage) ) return 0; - pCheck->zPfx = "Page %u: "; + pCheck->zPfx = "Tree %u page %u: "; pCheck->v1 = iPage; if( (rc = btreeGetPage(pBt, iPage, &pPage, 0))!=0 ){ checkAppendMsg(pCheck, "unable to get the page. error code=%d", rc); + if( rc==SQLITE_IOERR_NOMEM ) pCheck->rc = SQLITE_NOMEM; goto end_of_check; } @@ -77628,7 +81282,7 @@ static int checkTreePage( hdr = pPage->hdrOffset; /* Set up for cell analysis */ - pCheck->zPfx = "On tree page %u cell %d: "; + pCheck->zPfx = "Tree %u page %u cell %u: "; contentOffset = get2byteNotZero(&data[hdr+5]); assert( contentOffset<=usableSize ); /* Enforced by btreeInitPage() */ @@ -77648,7 +81302,7 @@ static int checkTreePage( pgno = get4byte(&data[hdr+8]); #ifndef SQLITE_OMIT_AUTOVACUUM if( pBt->autoVacuum ){ - pCheck->zPfx = "On page %u at right child: "; + pCheck->zPfx = "Tree %u page %u right child: "; checkPtrmap(pCheck, pgno, PTRMAP_BTREE, iPage); } #endif @@ -77672,7 +81326,7 @@ static int checkTreePage( pc = get2byteAligned(pCellIdx); pCellIdx -= 2; if( pcusableSize-4 ){ - checkAppendMsg(pCheck, "Offset %d out of range %d..%d", + checkAppendMsg(pCheck, "Offset %u out of range %u..%u", pc, contentOffset, usableSize-4); doCoverageCheck = 0; continue; @@ -77804,7 +81458,7 @@ static int checkTreePage( */ if( heap[0]==0 && nFrag!=data[hdr+7] ){ checkAppendMsg(pCheck, - "Fragmentation of %d bytes reported as %d on page %u", + "Fragmentation of %u bytes reported as %u on page %u", nFrag, data[hdr+7], iPage); } } @@ -77842,13 +81496,14 @@ static int checkTreePage( ** the unverified btrees. Except, if aRoot[1] is 1, then the freelist ** checks are still performed. */ -SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( +SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck( sqlite3 *db, /* Database connection that is running the check */ Btree *p, /* The btree to be checked */ Pgno *aRoot, /* An array of root pages numbers for individual trees */ int nRoot, /* Number of entries in aRoot[] */ int mxErr, /* Stop reporting errors after this many */ - int *pnErr /* Write number of errors seen to this variable */ + int *pnErr, /* OUT: Write number of errors seen to this variable */ + char **pzOut /* OUT: Write the error message string here */ ){ Pgno i; IntegrityCk sCheck; @@ -77871,42 +81526,36 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE ); VVA_ONLY( nRef = sqlite3PagerRefcount(pBt->pPager) ); assert( nRef>=0 ); + memset(&sCheck, 0, sizeof(sCheck)); sCheck.db = db; sCheck.pBt = pBt; sCheck.pPager = pBt->pPager; - sCheck.nPage = btreePagecount(sCheck.pBt); + sCheck.nCkPage = btreePagecount(sCheck.pBt); sCheck.mxErr = mxErr; - sCheck.nErr = 0; - sCheck.bOomFault = 0; - sCheck.zPfx = 0; - sCheck.v1 = 0; - sCheck.v2 = 0; - sCheck.aPgRef = 0; - sCheck.heap = 0; sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH); sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL; - if( sCheck.nPage==0 ){ + if( sCheck.nCkPage==0 ){ goto integrity_ck_cleanup; } - sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1); + sCheck.aPgRef = sqlite3MallocZero((sCheck.nCkPage / 8)+ 1); if( !sCheck.aPgRef ){ - sCheck.bOomFault = 1; + checkOom(&sCheck); goto integrity_ck_cleanup; } sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize ); if( sCheck.heap==0 ){ - sCheck.bOomFault = 1; + checkOom(&sCheck); goto integrity_ck_cleanup; } i = PENDING_BYTE_PAGE(pBt); - if( i<=sCheck.nPage ) setPageReferenced(&sCheck, i); + if( i<=sCheck.nCkPage ) setPageReferenced(&sCheck, i); /* Check the integrity of the freelist */ if( bCkFreelist ){ - sCheck.zPfx = "Main freelist: "; + sCheck.zPfx = "Freelist: "; checkList(&sCheck, 1, get4byte(&pBt->pPage1->aData[32]), get4byte(&pBt->pPage1->aData[36])); sCheck.zPfx = 0; @@ -77923,7 +81572,7 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( mxInHdr = get4byte(&pBt->pPage1->aData[52]); if( mx!=mxInHdr ){ checkAppendMsg(&sCheck, - "max rootpage (%d) disagrees with header (%d)", + "max rootpage (%u) disagrees with header (%u)", mx, mxInHdr ); } @@ -77944,6 +81593,7 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( checkPtrmap(&sCheck, aRoot[i], PTRMAP_ROOTPAGE, 0); } #endif + sCheck.v0 = aRoot[i]; checkTreePage(&sCheck, aRoot[i], ¬Used, LARGEST_INT64); } pBt->db->flags = savedDbFlags; @@ -77951,10 +81601,10 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( /* Make sure every page in the file is referenced */ if( !bPartial ){ - for(i=1; i<=sCheck.nPage && sCheck.mxErr; i++){ + for(i=1; i<=sCheck.nCkPage && sCheck.mxErr; i++){ #ifdef SQLITE_OMIT_AUTOVACUUM if( getPageReferenced(&sCheck, i)==0 ){ - checkAppendMsg(&sCheck, "Page %d is never used", i); + checkAppendMsg(&sCheck, "Page %u: never used", i); } #else /* If the database supports auto-vacuum, make sure no tables contain @@ -77962,11 +81612,11 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( */ if( getPageReferenced(&sCheck, i)==0 && (PTRMAP_PAGENO(pBt, i)!=i || !pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, "Page %d is never used", i); + checkAppendMsg(&sCheck, "Page %u: never used", i); } if( getPageReferenced(&sCheck, i)!=0 && (PTRMAP_PAGENO(pBt, i)==i && pBt->autoVacuum) ){ - checkAppendMsg(&sCheck, "Pointer map page %d is referenced", i); + checkAppendMsg(&sCheck, "Page %u: pointer map referenced", i); } #endif } @@ -77977,16 +81627,17 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck( integrity_ck_cleanup: sqlite3PageFree(sCheck.heap); sqlite3_free(sCheck.aPgRef); - if( sCheck.bOomFault ){ + *pnErr = sCheck.nErr; + if( sCheck.nErr==0 ){ sqlite3_str_reset(&sCheck.errMsg); - sCheck.nErr++; + *pzOut = 0; + }else{ + *pzOut = sqlite3StrAccumFinish(&sCheck.errMsg); } - *pnErr = sCheck.nErr; - if( sCheck.nErr==0 ) sqlite3_str_reset(&sCheck.errMsg); /* Make sure this analysis did not leave any unref() pages. */ assert( nRef==sqlite3PagerRefcount(pBt->pPager) ); sqlite3BtreeLeave(p); - return sqlite3StrAccumFinish(&sCheck.errMsg); + return sCheck.rc; } #endif /* SQLITE_OMIT_INTEGRITY_CHECK */ @@ -78251,6 +81902,17 @@ SQLITE_PRIVATE int sqlite3BtreeIsReadonly(Btree *p){ */ SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void){ return ROUND8(sizeof(MemPage)); } +/* +** If no transaction is active and the database is not a temp-db, clear +** the in-memory pager cache. +*/ +SQLITE_PRIVATE void sqlite3BtreeClearCache(Btree *p){ + BtShared *pBt = p->pBt; + if( pBt->inTransaction==TRANS_NONE ){ + sqlite3PagerClearCache(pBt->pPager); + } +} + #if !defined(SQLITE_OMIT_SHARED_CACHE) /* ** Return true if the Btree passed as the only argument is sharable. @@ -78547,13 +82209,7 @@ static int backupOnePage( assert( !isFatalError(p->rc) ); assert( iSrcPg!=PENDING_BYTE_PAGE(p->pSrc->pBt) ); assert( zSrcData ); - - /* Catch the case where the destination is an in-memory database and the - ** page sizes of the source and destination differ. - */ - if( nSrcPgsz!=nDestPgsz && sqlite3PagerIsMemdb(pDestPager) ){ - rc = SQLITE_READONLY; - } + assert( nSrcPgsz==nDestPgsz || sqlite3PagerIsMemdb(pDestPager)==0 ); /* BEGIN SQLCIPHER */ #ifdef SQLITE_HAS_CODEC @@ -78708,7 +82364,10 @@ SQLITE_API int sqlite3_backup_step(sqlite3_backup *p, int nPage){ pgszSrc = sqlite3BtreeGetPageSize(p->pSrc); pgszDest = sqlite3BtreeGetPageSize(p->pDest); destMode = sqlite3PagerGetJournalMode(sqlite3BtreePager(p->pDest)); - if( SQLITE_OK==rc && destMode==PAGER_JOURNALMODE_WAL && pgszSrc!=pgszDest ){ + if( SQLITE_OK==rc + && (destMode==PAGER_JOURNALMODE_WAL || sqlite3PagerIsMemdb(pDestPager)) + && pgszSrc!=pgszDest + ){ rc = SQLITE_READONLY; } @@ -79220,9 +82879,9 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){ i64 x; assert( (p->flags&MEM_Int)*2==sizeof(x) ); memcpy(&x, (char*)&p->u, (p->flags&MEM_Int)*2); - sqlite3Int64ToText(x, zBuf); + p->n = sqlite3Int64ToText(x, zBuf); #else - sqlite3Int64ToText(p->u.i, zBuf); + p->n = sqlite3Int64ToText(p->u.i, zBuf); #endif }else{ sqlite3StrAccumInit(&acc, 0, zBuf, sz, 0); @@ -79230,6 +82889,7 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){ (p->flags & MEM_IntReal)!=0 ? (double)p->u.i : p->u.r); assert( acc.zText==zBuf && acc.mxAlloc<=0 ); zBuf[acc.nChar] = 0; /* Fast version of sqlite3StrAccumFinish(&acc) */ + p->n = acc.nChar; } } @@ -79257,10 +82917,12 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){ ** This routine is for use inside of assert() statements only. */ SQLITE_PRIVATE int sqlite3VdbeMemValidStrRep(Mem *p){ + Mem tmp; char zBuf[100]; char *z; int i, j, incr; if( (p->flags & MEM_Str)==0 ) return 1; + if( p->db && p->db->mallocFailed ) return 1; if( p->flags & MEM_Term ){ /* Insure that the string is properly zero-terminated. Pay particular ** attention to the case where p->n is odd */ @@ -79273,7 +82935,8 @@ SQLITE_PRIVATE int sqlite3VdbeMemValidStrRep(Mem *p){ assert( p->enc==SQLITE_UTF8 || p->z[((p->n+1)&~1)+1]==0 ); } if( (p->flags & (MEM_Int|MEM_Real|MEM_IntReal))==0 ) return 1; - vdbeMemRenderNum(sizeof(zBuf), zBuf, p); + memcpy(&tmp, p, sizeof(tmp)); + vdbeMemRenderNum(sizeof(zBuf), zBuf, &tmp); z = p->z; i = j = 0; incr = 1; @@ -79416,6 +83079,40 @@ SQLITE_PRIVATE int sqlite3VdbeMemClearAndResize(Mem *pMem, int szNew){ return SQLITE_OK; } +/* +** If pMem is already a string, detect if it is a zero-terminated +** string, or make it into one if possible, and mark it as such. +** +** This is an optimization. Correct operation continues even if +** this routine is a no-op. +*/ +SQLITE_PRIVATE void sqlite3VdbeMemZeroTerminateIfAble(Mem *pMem){ + if( (pMem->flags & (MEM_Str|MEM_Term|MEM_Ephem|MEM_Static))!=MEM_Str ){ + /* pMem must be a string, and it cannot be an ephemeral or static string */ + return; + } + if( pMem->enc!=SQLITE_UTF8 ) return; + if( NEVER(pMem->z==0) ) return; + if( pMem->flags & MEM_Dyn ){ + if( pMem->xDel==sqlite3_free + && sqlite3_msize(pMem->z) >= (u64)(pMem->n+1) + ){ + pMem->z[pMem->n] = 0; + pMem->flags |= MEM_Term; + return; + } + if( pMem->xDel==sqlite3RCStrUnref ){ + /* Blindly assume that all RCStr objects are zero-terminated */ + pMem->flags |= MEM_Term; + return; + } + }else if( pMem->szMalloc >= pMem->n+1 ){ + pMem->z[pMem->n] = 0; + pMem->flags |= MEM_Term; + return; + } +} + /* ** It is already known that pMem contains an unterminated string. ** Add the zero terminator. @@ -79542,7 +83239,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){ vdbeMemRenderNum(nByte, pMem->z, pMem); assert( pMem->z!=0 ); - pMem->n = sqlite3Strlen30NN(pMem->z); + assert( pMem->n==(int)sqlite3Strlen30NN(pMem->z) ); pMem->enc = SQLITE_UTF8; pMem->flags |= MEM_Str|MEM_Term; if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal); @@ -79677,36 +83374,6 @@ SQLITE_PRIVATE void sqlite3VdbeMemReleaseMalloc(Mem *p){ if( p->szMalloc ) vdbeMemClear(p); } -/* -** Convert a 64-bit IEEE double into a 64-bit signed integer. -** If the double is out of range of a 64-bit signed integer then -** return the closest available 64-bit signed integer. -*/ -static SQLITE_NOINLINE i64 doubleToInt64(double r){ -#ifdef SQLITE_OMIT_FLOATING_POINT - /* When floating-point is omitted, double and int64 are the same thing */ - return r; -#else - /* - ** Many compilers we encounter do not define constants for the - ** minimum and maximum 64-bit integers, or they define them - ** inconsistently. And many do not understand the "LL" notation. - ** So we define our own static constants here using nothing - ** larger than a 32-bit integer constant. - */ - static const i64 maxInt = LARGEST_INT64; - static const i64 minInt = SMALLEST_INT64; - - if( r<=(double)minInt ){ - return minInt; - }else if( r>=(double)maxInt ){ - return maxInt; - }else{ - return (i64)r; - } -#endif -} - /* ** Return some kind of integer value which is the best we can do ** at representing the value that *pMem describes as an integer. @@ -79733,7 +83400,7 @@ SQLITE_PRIVATE i64 sqlite3VdbeIntValue(const Mem *pMem){ testcase( flags & MEM_IntReal ); return pMem->u.i; }else if( flags & MEM_Real ){ - return doubleToInt64(pMem->u.r); + return sqlite3RealToI64(pMem->u.r); }else if( (flags & (MEM_Str|MEM_Blob))!=0 && pMem->z!=0 ){ return memIntValue(pMem); }else{ @@ -79782,32 +83449,35 @@ SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem *pMem, int ifNull){ } /* -** The MEM structure is already a MEM_Real. Try to also make it a -** MEM_Int if we can. +** The MEM structure is already a MEM_Real or MEM_IntReal. Try to +** make it a MEM_Int if we can. */ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){ - i64 ix; assert( pMem!=0 ); - assert( pMem->flags & MEM_Real ); + assert( pMem->flags & (MEM_Real|MEM_IntReal) ); assert( !sqlite3VdbeMemIsRowSet(pMem) ); assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); assert( EIGHT_BYTE_ALIGNMENT(pMem) ); - ix = doubleToInt64(pMem->u.r); - - /* Only mark the value as an integer if - ** - ** (1) the round-trip conversion real->int->real is a no-op, and - ** (2) The integer is neither the largest nor the smallest - ** possible integer (ticket #3922) - ** - ** The second and third terms in the following conditional enforces - ** the second condition under the assumption that addition overflow causes - ** values to wrap around. - */ - if( pMem->u.r==ix && ix>SMALLEST_INT64 && ixu.i = ix; + if( pMem->flags & MEM_IntReal ){ MemSetTypeFlag(pMem, MEM_Int); + }else{ + i64 ix = sqlite3RealToI64(pMem->u.r); + + /* Only mark the value as an integer if + ** + ** (1) the round-trip conversion real->int->real is a no-op, and + ** (2) The integer is neither the largest nor the smallest + ** possible integer (ticket #3922) + ** + ** The second and third terms in the following conditional enforces + ** the second condition under the assumption that addition overflow causes + ** values to wrap around. + */ + if( pMem->u.r==ix && ix>SMALLEST_INT64 && ixu.i = ix; + MemSetTypeFlag(pMem, MEM_Int); + } } } @@ -79855,6 +83525,16 @@ SQLITE_PRIVATE int sqlite3RealSameAsInt(double r1, sqlite3_int64 i){ && i >= -2251799813685248LL && i < 2251799813685248LL); } +/* Convert a floating point value to its closest integer. Do so in +** a way that avoids 'outside the range of representable values' warnings +** from UBSAN. +*/ +SQLITE_PRIVATE i64 sqlite3RealToI64(double r){ + if( r<-9223372036854774784.0 ) return SMALLEST_INT64; + if( r>+9223372036854774784.0 ) return LARGEST_INT64; + return (i64)r; +} + /* ** Convert pMem so that it has type MEM_Real or MEM_Int. ** Invalidate any prior representations. @@ -79876,7 +83556,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem *pMem){ assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) ); rc = sqlite3AtoF(pMem->z, &pMem->u.r, pMem->n, pMem->enc); if( ((rc==0 || rc==1) && sqlite3Atoi64(pMem->z, &ix, pMem->n, pMem->enc)<=1) - || sqlite3RealSameAsInt(pMem->u.r, (ix = (i64)pMem->u.r)) + || sqlite3RealSameAsInt(pMem->u.r, (ix = sqlite3RealToI64(pMem->u.r))) ){ pMem->u.i = ix; MemSetTypeFlag(pMem, MEM_Int); @@ -79922,13 +83602,17 @@ SQLITE_PRIVATE int sqlite3VdbeMemCast(Mem *pMem, u8 aff, u8 encoding){ break; } default: { + int rc; assert( aff==SQLITE_AFF_TEXT ); assert( MEM_Str==(MEM_Blob>>3) ); pMem->flags |= (pMem->flags&MEM_Blob)>>3; sqlite3ValueApplyAffinity(pMem, SQLITE_AFF_TEXT, encoding); assert( pMem->flags & MEM_Str || pMem->db->mallocFailed ); pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal|MEM_Blob|MEM_Zero); - return sqlite3VdbeChangeEncoding(pMem, encoding); + if( encoding!=SQLITE_UTF8 ) pMem->n &= ~1; + rc = sqlite3VdbeChangeEncoding(pMem, encoding); + if( rc ) return rc; + sqlite3VdbeMemZeroTerminateIfAble(pMem); } } return SQLITE_OK; @@ -80452,6 +84136,24 @@ SQLITE_PRIVATE const void *sqlite3ValueText(sqlite3_value* pVal, u8 enc){ return valueToText(pVal, enc); } +/* Return true if sqlit3_value object pVal is a string or blob value +** that uses the destructor specified in the second argument. +** +** TODO: Maybe someday promote this interface into a published API so +** that third-party extensions can get access to it? +*/ +SQLITE_PRIVATE int sqlite3ValueIsOfClass(const sqlite3_value *pVal, void(*xFree)(void*)){ + if( ALWAYS(pVal!=0) + && ALWAYS((pVal->flags & (MEM_Str|MEM_Blob))!=0) + && (pVal->flags & MEM_Dyn)!=0 + && pVal->xDel==xFree + ){ + return 1; + }else{ + return 0; + } +} + /* ** Create a new sqlite3_value object. */ @@ -80519,6 +84221,7 @@ static sqlite3_value *valueNew(sqlite3 *db, struct ValueNewStat4Ctx *p){ } pRec->nField = p->iVal+1; + sqlite3VdbeMemSetNull(&pRec->aMem[p->iVal]); return &pRec->aMem[p->iVal]; } #else @@ -80572,9 +84275,12 @@ static int valueFromFunction( if( pList ) nVal = pList->nExpr; assert( !ExprHasProperty(p, EP_IntValue) ); pFunc = sqlite3FindFunction(db, p->u.zToken, nVal, enc, 0); +#ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION + if( pFunc==0 ) return SQLITE_OK; +#endif assert( pFunc ); if( (pFunc->funcFlags & (SQLITE_FUNC_CONSTANT|SQLITE_FUNC_SLOCHNG))==0 - || (pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL) + || (pFunc->funcFlags & (SQLITE_FUNC_NEEDCOLL|SQLITE_FUNC_RUNONLY))!=0 ){ return SQLITE_OK; } @@ -80597,8 +84303,6 @@ static int valueFromFunction( goto value_from_function_out; } - testcase( pCtx->pParse->rc==SQLITE_ERROR ); - testcase( pCtx->pParse->rc==SQLITE_OK ); memset(&ctx, 0, sizeof(ctx)); ctx.pOut = pVal; ctx.pFunc = pFunc; @@ -80611,16 +84315,16 @@ static int valueFromFunction( sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8); assert( rc==SQLITE_OK ); rc = sqlite3VdbeChangeEncoding(pVal, enc); - if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){ + if( NEVER(rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal)) ){ rc = SQLITE_TOOBIG; pCtx->pParse->nErr++; } } - pCtx->pParse->rc = rc; value_from_function_out: if( rc!=SQLITE_OK ){ pVal = 0; + pCtx->pParse->rc = rc; } if( apVal ){ for(i=0; ipLeft, enc, aff, ppVal, pCtx); testcase( rc!=SQLITE_OK ); if( *ppVal ){ +#ifdef SQLITE_ENABLE_STAT4 + rc = ExpandBlob(*ppVal); +#else + /* zero-blobs only come from functions, not literal values. And + ** functions are only processed under STAT4 */ + assert( (ppVal[0][0].flags & MEM_Zero)==0 ); +#endif sqlite3VdbeMemCast(*ppVal, aff, enc); sqlite3ValueApplyAffinity(*ppVal, affinity, enc); } @@ -80770,6 +84481,7 @@ static int valueFromExpr( if( pVal ){ pVal->flags = MEM_Int; pVal->u.i = pExpr->u.zToken[4]==0; + sqlite3ValueApplyAffinity(pVal, affinity, enc); } } @@ -81063,6 +84775,9 @@ SQLITE_PRIVATE int sqlite3ValueBytes(sqlite3_value *pVal, u8 enc){ if( (p->flags & MEM_Str)!=0 && pVal->enc==enc ){ return p->n; } + if( (p->flags & MEM_Str)!=0 && enc!=SQLITE_UTF8 && pVal->enc!=SQLITE_UTF8 ){ + return p->n; + } if( (p->flags & MEM_Blob)!=0 ){ if( p->flags & MEM_Zero ){ return p->n + p->u.nZero; @@ -81108,10 +84823,10 @@ SQLITE_PRIVATE Vdbe *sqlite3VdbeCreate(Parse *pParse){ memset(&p->aOp, 0, sizeof(Vdbe)-offsetof(Vdbe,aOp)); p->db = db; if( db->pVdbe ){ - db->pVdbe->pPrev = p; + db->pVdbe->ppVPrev = &p->pVNext; } - p->pNext = db->pVdbe; - p->pPrev = 0; + p->pVNext = db->pVdbe; + p->ppVPrev = &db->pVdbe; db->pVdbe = p; assert( p->eVdbeState==VDBE_INIT_STATE ); p->pParse = pParse; @@ -81193,21 +84908,28 @@ SQLITE_PRIVATE int sqlite3VdbeUsesDoubleQuotedString( #endif /* -** Swap all content between two VDBE structures. +** Swap byte-code between two VDBE structures. +** +** This happens after pB was previously run and returned +** SQLITE_SCHEMA. The statement was then reprepared in pA. +** This routine transfers the new bytecode in pA over to pB +** so that pB can be run again. The old pB byte code is +** moved back to pA so that it will be cleaned up when pA is +** finalized. */ SQLITE_PRIVATE void sqlite3VdbeSwap(Vdbe *pA, Vdbe *pB){ - Vdbe tmp, *pTmp; + Vdbe tmp, *pTmp, **ppTmp; char *zTmp; assert( pA->db==pB->db ); tmp = *pA; *pA = *pB; *pB = tmp; - pTmp = pA->pNext; - pA->pNext = pB->pNext; - pB->pNext = pTmp; - pTmp = pA->pPrev; - pA->pPrev = pB->pPrev; - pB->pPrev = pTmp; + pTmp = pA->pVNext; + pA->pVNext = pB->pVNext; + pB->pVNext = pTmp; + ppTmp = pA->ppVPrev; + pA->ppVPrev = pB->ppVPrev; + pB->ppVPrev = ppTmp; zTmp = pA->zSql; pA->zSql = pB->zSql; pB->zSql = zTmp; @@ -81282,11 +85004,43 @@ static int growOpArray(Vdbe *v, int nOp){ ** sqlite3CantopenError(lineno) */ static void test_addop_breakpoint(int pc, Op *pOp){ - static int n = 0; + static u64 n = 0; + (void)pc; + (void)pOp; n++; + if( n==LARGEST_UINT64 ) abort(); /* so that n is used, preventing a warning */ } #endif +/* +** Slow paths for sqlite3VdbeAddOp3() and sqlite3VdbeAddOp4Int() for the +** unusual case when we need to increase the size of the Vdbe.aOp[] array +** before adding the new opcode. +*/ +static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){ + assert( p->nOpAlloc<=p->nOp ); + if( growOpArray(p, 1) ) return 1; + assert( p->nOpAlloc>p->nOp ); + return sqlite3VdbeAddOp3(p, op, p1, p2, p3); +} +static SQLITE_NOINLINE int addOp4IntSlow( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + int p4 /* The P4 operand as an integer */ +){ + int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); + if( p->db->mallocFailed==0 ){ + VdbeOp *pOp = &p->aOp[addr]; + pOp->p4type = P4_INT32; + pOp->p4.i = p4; + } + return addr; +} + + /* ** Add a new instruction to the list of instructions current in the ** VDBE. Return the address of the new instruction. @@ -81297,17 +85051,16 @@ static void test_addop_breakpoint(int pc, Op *pOp){ ** ** op The opcode for this instruction ** -** p1, p2, p3 Operands -** -** Use the sqlite3VdbeResolveLabel() function to fix an address and -** the sqlite3VdbeChangeP4() function to change the value of the P4 -** operand. +** p1, p2, p3, p4 Operands */ -static SQLITE_NOINLINE int growOp3(Vdbe *p, int op, int p1, int p2, int p3){ - assert( p->nOpAlloc<=p->nOp ); - if( growOpArray(p, 1) ) return 1; - assert( p->nOpAlloc>p->nOp ); - return sqlite3VdbeAddOp3(p, op, p1, p2, p3); +SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){ + return sqlite3VdbeAddOp3(p, op, 0, 0, 0); +} +SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ + return sqlite3VdbeAddOp3(p, op, p1, 0, 0); +} +SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ + return sqlite3VdbeAddOp3(p, op, p1, p2, 0); } SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ int i; @@ -81330,32 +85083,78 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){ pOp->p3 = p3; pOp->p4.p = 0; pOp->p4type = P4_NOTUSED; + + /* Replicate this logic in sqlite3VdbeAddOp4Int() + ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */ #ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS pOp->zComment = 0; #endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + pOp->nExec = 0; + pOp->nCycle = 0; +#endif #ifdef SQLITE_DEBUG if( p->db->flags & SQLITE_VdbeAddopTrace ){ sqlite3VdbePrintOp(0, i, &p->aOp[i]); test_addop_breakpoint(i, &p->aOp[i]); } #endif -#ifdef VDBE_PROFILE - pOp->cycles = 0; - pOp->cnt = 0; -#endif #ifdef SQLITE_VDBE_COVERAGE pOp->iSrcLine = 0; #endif + /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ** Replicate in sqlite3VdbeAddOp4Int() */ + return i; } -SQLITE_PRIVATE int sqlite3VdbeAddOp0(Vdbe *p, int op){ - return sqlite3VdbeAddOp3(p, op, 0, 0, 0); -} -SQLITE_PRIVATE int sqlite3VdbeAddOp1(Vdbe *p, int op, int p1){ - return sqlite3VdbeAddOp3(p, op, p1, 0, 0); -} -SQLITE_PRIVATE int sqlite3VdbeAddOp2(Vdbe *p, int op, int p1, int p2){ - return sqlite3VdbeAddOp3(p, op, p1, p2, 0); +SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( + Vdbe *p, /* Add the opcode to this VM */ + int op, /* The new opcode */ + int p1, /* The P1 operand */ + int p2, /* The P2 operand */ + int p3, /* The P3 operand */ + int p4 /* The P4 operand as an integer */ +){ + int i; + VdbeOp *pOp; + + i = p->nOp; + if( p->nOpAlloc<=i ){ + return addOp4IntSlow(p, op, p1, p2, p3, p4); + } + p->nOp++; + pOp = &p->aOp[i]; + assert( pOp!=0 ); + pOp->opcode = (u8)op; + pOp->p5 = 0; + pOp->p1 = p1; + pOp->p2 = p2; + pOp->p3 = p3; + pOp->p4.i = p4; + pOp->p4type = P4_INT32; + + /* Replicate this logic in sqlite3VdbeAddOp3() + ** vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv */ +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + pOp->zComment = 0; +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + pOp->nExec = 0; + pOp->nCycle = 0; +#endif +#ifdef SQLITE_DEBUG + if( p->db->flags & SQLITE_VdbeAddopTrace ){ + sqlite3VdbePrintOp(0, i, &p->aOp[i]); + test_addop_breakpoint(i, &p->aOp[i]); + } +#endif +#ifdef SQLITE_VDBE_COVERAGE + pOp->iSrcLine = 0; +#endif + /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + ** Replicate in sqlite3VdbeAddOp3() */ + + return i; } /* Generate code for an unconditional jump to instruction iDest @@ -81510,11 +85309,12 @@ SQLITE_PRIVATE void sqlite3ExplainBreakpoint(const char *z1, const char *z2){ ** If the bPush flag is true, then make this opcode the parent for ** subsequent Explains until sqlite3VdbeExplainPop() is called. */ -SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){ -#ifndef SQLITE_DEBUG +SQLITE_PRIVATE int sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){ + int addr = 0; +#if !defined(SQLITE_DEBUG) /* Always include the OP_Explain opcodes if SQLITE_DEBUG is defined. ** But omit them (for performance) during production builds */ - if( pParse->explain==2 ) + if( pParse->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { char *zMsg; @@ -81526,13 +85326,15 @@ SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt va_end(ap); v = pParse->pVdbe; iThis = v->nOp; - sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0, + addr = sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0, zMsg, P4_DYNAMIC); - sqlite3ExplainBreakpoint(bPush?"PUSH":"", sqlite3VdbeGetOp(v,-1)->p4.z); + sqlite3ExplainBreakpoint(bPush?"PUSH":"", sqlite3VdbeGetLastOp(v)->p4.z); if( bPush){ pParse->addrExplain = iThis; } + sqlite3VdbeScanStatus(v, iThis, -1, -1, 0, 0); } + return addr; } /* @@ -81560,26 +85362,6 @@ SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe *p, int iDb, char *zWhere, sqlite3MayAbort(p->pParse); } -/* -** Add an opcode that includes the p4 value as an integer. -*/ -SQLITE_PRIVATE int sqlite3VdbeAddOp4Int( - Vdbe *p, /* Add the opcode to this VM */ - int op, /* The new opcode */ - int p1, /* The P1 operand */ - int p2, /* The P2 operand */ - int p3, /* The P3 operand */ - int p4 /* The P4 operand as an integer */ -){ - int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3); - if( p->db->mallocFailed==0 ){ - VdbeOp *pOp = &p->aOp[addr]; - pOp->p4type = P4_INT32; - pOp->p4.i = p4; - } - return addr; -} - /* Insert the end of a co-routine */ SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe *v, int regYield){ @@ -81640,6 +85422,9 @@ static SQLITE_NOINLINE void resizeResolveLabel(Parse *p, Vdbe *v, int j){ int i; for(i=p->nLabelAlloc; iaLabel[i] = -1; #endif + if( nNewSize>=100 && (nNewSize/100)>(p->nLabelAlloc/100) ){ + sqlite3ProgressCheck(p); + } p->nLabelAlloc = nNewSize; p->aLabel[j] = v->nOp; } @@ -81883,11 +85668,13 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ Op *pOp; Parse *pParse = p->pParse; int *aLabel = pParse->aLabel; + + assert( pParse->db->mallocFailed==0 ); /* tag-20230419-1 */ p->readOnly = 1; p->bIsReader = 0; pOp = &p->aOp[p->nOp-1]; - while(1){ - + assert( p->aOp[0].opcode==OP_Init ); + while( 1 /* Loop terminates when it reaches the OP_Init opcode */ ){ /* Only JUMP opcodes and the short list of special opcodes in the switch ** below need to be considered. The mkopcodeh.tcl generator script groups ** all these opcodes together near the front of the opcode list. Skip @@ -81916,6 +85703,10 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ p->bIsReader = 1; break; } + case OP_Init: { + assert( pOp->p2>=0 ); + goto resolve_p2_values_loop_exit; + } #ifndef SQLITE_OMIT_VIRTUALTABLE case OP_VUpdate: { if( pOp->p2>nMaxArgs ) nMaxArgs = pOp->p2; @@ -81938,6 +85729,7 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ ** have non-negative values for P2. */ assert( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_JUMP)!=0 ); assert( ADDR(pOp->p2)<-pParse->nLabel ); + assert( aLabel!=0 ); /* True because of tag-20230419-1 */ pOp->p2 = aLabel[ADDR(pOp->p2)]; } break; @@ -81948,11 +85740,12 @@ static void resolveP2Values(Vdbe *p, int *pMaxFuncArgs){ ** have non-negative values for P2. */ assert( (sqlite3OpcodeProperty[pOp->opcode]&OPFLG_JUMP)==0 || pOp->p2>=0); } - if( pOp==p->aOp ) break; + assert( pOp>p->aOp ); pOp--; } +resolve_p2_values_loop_exit: if( aLabel ){ - sqlite3DbFreeNN(p->db, pParse->aLabel); + sqlite3DbNNFreeNN(p->db, pParse->aLabel); pParse->aLabel = 0; } pParse->nLabel = 0; @@ -82003,6 +85796,10 @@ SQLITE_PRIVATE void sqlite3VdbeNoJumpsOutsideSubrtn( int iDest = pOp->p2; /* Jump destination */ if( iDest==0 ) continue; if( pOp->opcode==OP_Gosub ) continue; + if( pOp->p3==20230325 && pOp->opcode==OP_NotNull ){ + /* This is a deliberately taken illegal branch. tag-20230325-2 */ + continue; + } if( iDest<0 ){ int j = ADDR(iDest); assert( j>=0 ); @@ -82180,20 +85977,83 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus( LogEst nEst, /* Estimated number of output rows */ const char *zName /* Name of table or index being scanned */ ){ - sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); - ScanStatus *aNew; - aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); - if( aNew ){ - ScanStatus *pNew = &aNew[p->nScan++]; - pNew->addrExplain = addrExplain; - pNew->addrLoop = addrLoop; - pNew->addrVisit = addrVisit; - pNew->nEst = nEst; - pNew->zName = sqlite3DbStrDup(p->db, zName); - p->aScan = aNew; + if( IS_STMT_SCANSTATUS(p->db) ){ + sqlite3_int64 nByte = (p->nScan+1) * sizeof(ScanStatus); + ScanStatus *aNew; + aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte); + if( aNew ){ + ScanStatus *pNew = &aNew[p->nScan++]; + memset(pNew, 0, sizeof(ScanStatus)); + pNew->addrExplain = addrExplain; + pNew->addrLoop = addrLoop; + pNew->addrVisit = addrVisit; + pNew->nEst = nEst; + pNew->zName = sqlite3DbStrDup(p->db, zName); + p->aScan = aNew; + } } } -#endif + +/* +** Add the range of instructions from addrStart to addrEnd (inclusive) to +** the set of those corresponding to the sqlite3_stmt_scanstatus() counters +** associated with the OP_Explain instruction at addrExplain. The +** sum of the sqlite3Hwtime() values for each of these instructions +** will be returned for SQLITE_SCANSTAT_NCYCLE requests. +*/ +SQLITE_PRIVATE void sqlite3VdbeScanStatusRange( + Vdbe *p, + int addrExplain, + int addrStart, + int addrEnd +){ + if( IS_STMT_SCANSTATUS(p->db) ){ + ScanStatus *pScan = 0; + int ii; + for(ii=p->nScan-1; ii>=0; ii--){ + pScan = &p->aScan[ii]; + if( pScan->addrExplain==addrExplain ) break; + pScan = 0; + } + if( pScan ){ + if( addrEnd<0 ) addrEnd = sqlite3VdbeCurrentAddr(p)-1; + for(ii=0; iiaAddrRange); ii+=2){ + if( pScan->aAddrRange[ii]==0 ){ + pScan->aAddrRange[ii] = addrStart; + pScan->aAddrRange[ii+1] = addrEnd; + break; + } + } + } + } +} + +/* +** Set the addresses for the SQLITE_SCANSTAT_NLOOP and SQLITE_SCANSTAT_NROW +** counters for the query element associated with the OP_Explain at +** addrExplain. +*/ +SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters( + Vdbe *p, + int addrExplain, + int addrLoop, + int addrVisit +){ + if( IS_STMT_SCANSTATUS(p->db) ){ + ScanStatus *pScan = 0; + int ii; + for(ii=p->nScan-1; ii>=0; ii--){ + pScan = &p->aScan[ii]; + if( pScan->addrExplain==addrExplain ) break; + pScan = 0; + } + if( pScan ){ + if( addrLoop>0 ) pScan->addrLoop = addrLoop; + if( addrVisit>0 ) pScan->addrVisit = addrVisit; + } + } +} +#endif /* defined(SQLITE_ENABLE_STMT_SCANSTATUS) */ /* @@ -82201,15 +86061,19 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus( ** for a specific instruction. */ SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe *p, int addr, u8 iNewOpcode){ + assert( addr>=0 ); sqlite3VdbeGetOp(p,addr)->opcode = iNewOpcode; } SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe *p, int addr, int val){ + assert( addr>=0 ); sqlite3VdbeGetOp(p,addr)->p1 = val; } SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, int addr, int val){ + assert( addr>=0 || p->db->mallocFailed ); sqlite3VdbeGetOp(p,addr)->p2 = val; } SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, int addr, int val){ + assert( addr>=0 ); sqlite3VdbeGetOp(p,addr)->p3 = val; } SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ @@ -82217,6 +86081,18 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){ if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5; } +/* +** If the previous opcode is an OP_Column that delivers results +** into register iDest, then add the OPFLAG_TYPEOFARG flag to that +** opcode. +*/ +SQLITE_PRIVATE void sqlite3VdbeTypeofColumn(Vdbe *p, int iDest){ + VdbeOp *pOp = sqlite3VdbeGetLastOp(p); + if( pOp->p3==iDest && pOp->opcode==OP_Column ){ + pOp->p5 |= OPFLAG_TYPEOFARG; + } +} + /* ** Change the P2 operand of instruction addr so that it points to ** the address of the next instruction to be coded. @@ -82245,7 +86121,7 @@ SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe *p, int addr){ || p->aOp[addr].opcode==OP_FkIfZero ); assert( p->aOp[addr].p4type==0 ); #ifdef SQLITE_VDBE_COVERAGE - sqlite3VdbeGetOp(p,-1)->iSrcLine = 0; /* Erase VdbeCoverage() macros */ + sqlite3VdbeGetLastOp(p)->iSrcLine = 0; /* Erase VdbeCoverage() macros */ #endif p->nOp--; }else{ @@ -82256,11 +86132,12 @@ SQLITE_PRIVATE void sqlite3VdbeJumpHereOrPopInst(Vdbe *p, int addr){ /* ** If the input FuncDef structure is ephemeral, then free it. If -** the FuncDef is not ephermal, then do nothing. +** the FuncDef is not ephemeral, then do nothing. */ static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ + assert( db!=0 ); if( (pDef->funcFlags & SQLITE_FUNC_EPHEM)!=0 ){ - sqlite3DbFreeNN(db, pDef); + sqlite3DbNNFreeNN(db, pDef); } } @@ -82269,11 +86146,12 @@ static void freeEphemeralFunction(sqlite3 *db, FuncDef *pDef){ */ static SQLITE_NOINLINE void freeP4Mem(sqlite3 *db, Mem *p){ if( p->szMalloc ) sqlite3DbFree(db, p->zMalloc); - sqlite3DbFreeNN(db, p); + sqlite3DbNNFreeNN(db, p); } static SQLITE_NOINLINE void freeP4FuncCtx(sqlite3 *db, sqlite3_context *p){ + assert( db!=0 ); freeEphemeralFunction(db, p->pFunc); - sqlite3DbFreeNN(db, p); + sqlite3DbNNFreeNN(db, p); } static void freeP4(sqlite3 *db, int p4type, void *p4){ assert( db ); @@ -82286,7 +86164,7 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ case P4_INT64: case P4_DYNAMIC: case P4_INTARRAY: { - sqlite3DbFree(db, p4); + if( p4 ) sqlite3DbNNFreeNN(db, p4); break; } case P4_KEYINFO: { @@ -82315,6 +86193,10 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ if( db->pnBytesFreed==0 ) sqlite3VtabUnlock((VTable *)p4); break; } + case P4_TABLEREF: { + if( db->pnBytesFreed==0 ) sqlite3DeleteTable(db, (Table*)p4); + break; + } } } @@ -82325,6 +86207,7 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){ */ static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){ assert( nOp>=0 ); + assert( db!=0 ); if( aOp ){ Op *pOp = &aOp[nOp-1]; while(1){ /* Exit via break */ @@ -82335,7 +86218,7 @@ static void vdbeFreeOpArray(sqlite3 *db, Op *aOp, int nOp){ if( pOp==aOp ) break; pOp--; } - sqlite3DbFreeNN(db, aOp); + sqlite3DbNNFreeNN(db, aOp); } } @@ -82417,7 +86300,6 @@ SQLITE_PRIVATE void sqlite3VdbeReleaseRegisters( } #endif /* SQLITE_DEBUG */ - /* ** Change the value of the P4 operand for a specific instruction. ** This routine is useful when a large program is loaded from a @@ -82442,7 +86324,7 @@ static void SQLITE_NOINLINE vdbeChangeP4Full( int n ){ if( pOp->p4type ){ - freeP4(p->db, pOp->p4type, pOp->p4.p); + assert( pOp->p4type > P4_FREE_IF_LE ); pOp->p4type = 0; pOp->p4.p = 0; } @@ -82504,7 +86386,7 @@ SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe *p, void *pP4, int n){ if( p->db->mallocFailed ){ freeP4(p->db, n, pP4); }else{ - assert( pP4!=0 ); + assert( pP4!=0 || n==P4_DYNAMIC ); assert( p->nOp>0 ); pOp = &p->aOp[p->nOp-1]; assert( pOp->p4type==P4_NOTUSED ); @@ -82566,13 +86448,13 @@ SQLITE_PRIVATE void sqlite3VdbeNoopComment(Vdbe *p, const char *zFormat, ...){ ** Set the value if the iSrcLine field for the previously coded instruction. */ SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe *v, int iLine){ - sqlite3VdbeGetOp(v,-1)->iSrcLine = iLine; + sqlite3VdbeGetLastOp(v)->iSrcLine = iLine; } #endif /* SQLITE_VDBE_COVERAGE */ /* -** Return the opcode for a given address. If the address is -1, then -** return the most recently inserted opcode. +** Return the opcode for a given address. The address must be non-negative. +** See sqlite3VdbeGetLastOp() to get the most recently added opcode. ** ** If a memory allocation error has occurred prior to the calling of this ** routine, then a pointer to a dummy VdbeOp will be returned. That opcode @@ -82588,9 +86470,6 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ ** zeros, which is correct. MSVC generates a warning, nevertheless. */ static VdbeOp dummy; /* Ignore the MSVC warning about no initializer */ assert( p->eVdbeState==VDBE_INIT_STATE ); - if( addr<0 ){ - addr = p->nOp - 1; - } assert( (addr>=0 && addrnOp) || p->db->mallocFailed ); if( p->db->mallocFailed ){ return (VdbeOp*)&dummy; @@ -82599,6 +86478,12 @@ SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe *p, int addr){ } } +/* Return the most recently added opcode +*/ +SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetLastOp(Vdbe *p){ + return sqlite3VdbeGetOp(p, p->nOp - 1); +} + #if defined(SQLITE_ENABLE_EXPLAIN_COMMENTS) /* ** Return an integer value for one of the parameters to the opcode pOp @@ -83086,7 +86971,7 @@ static void releaseMemArray(Mem *p, int N){ sqlite3VdbeMemRelease(p); p->flags = MEM_Undefined; }else if( p->szMalloc ){ - sqlite3DbFreeNN(db, p->zMalloc); + sqlite3DbNNFreeNN(db, p->zMalloc); p->szMalloc = 0; p->flags = MEM_Undefined; } @@ -83300,7 +87185,6 @@ SQLITE_PRIVATE int sqlite3VdbeList( ** sqlite3_column_text16(), causing a translation to UTF-16 encoding. */ releaseMemArray(pMem, 8); - p->pResultSet = 0; if( p->rc==SQLITE_NOMEM ){ /* This happens if a malloc() inside a call to sqlite3_column_text() or @@ -83336,7 +87220,7 @@ SQLITE_PRIVATE int sqlite3VdbeList( sqlite3VdbeMemSetInt64(pMem+1, pOp->p2); sqlite3VdbeMemSetInt64(pMem+2, pOp->p3); sqlite3VdbeMemSetStr(pMem+3, zP4, -1, SQLITE_UTF8, sqlite3_free); - p->nResColumn = 4; + assert( p->nResColumn==4 ); }else{ sqlite3VdbeMemSetInt64(pMem+0, i); sqlite3VdbeMemSetStr(pMem+1, (char*)sqlite3OpcodeName(pOp->opcode), @@ -83355,9 +87239,9 @@ SQLITE_PRIVATE int sqlite3VdbeList( sqlite3VdbeMemSetNull(pMem+7); #endif sqlite3VdbeMemSetStr(pMem+5, zP4, -1, SQLITE_UTF8, sqlite3_free); - p->nResColumn = 8; + assert( p->nResColumn==8 ); } - p->pResultSet = pMem; + p->pResultRow = pMem; if( db->mallocFailed ){ p->rc = SQLITE_NOMEM; rc = SQLITE_ERROR; @@ -83468,7 +87352,7 @@ static void *allocSpace( ** running it. */ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) +#if defined(SQLITE_DEBUG) int i; #endif assert( p!=0 ); @@ -83497,8 +87381,8 @@ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){ p->nFkConstraint = 0; #ifdef VDBE_PROFILE for(i=0; inOp; i++){ - p->aOp[i].cnt = 0; - p->aOp[i].cycles = 0; + p->aOp[i].nExec = 0; + p->aOp[i].nCycle = 0; } #endif } @@ -83569,26 +87453,9 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( resolveP2Values(p, &nArg); p->usesStmtJournal = (u8)(pParse->isMultiWrite && pParse->mayAbort); if( pParse->explain ){ - static const char * const azColName[] = { - "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", - "id", "parent", "notused", "detail" - }; - int iFirst, mx, i; if( nMem<10 ) nMem = 10; p->explain = pParse->explain; - if( pParse->explain==2 ){ - sqlite3VdbeSetNumCols(p, 4); - iFirst = 8; - mx = 12; - }else{ - sqlite3VdbeSetNumCols(p, 8); - iFirst = 0; - mx = 8; - } - for(i=iFirst; inResColumn = 12 - 4*p->explain; } p->expired = 0; @@ -83607,9 +87474,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( p->aVar = allocSpace(&x, 0, nVar*sizeof(Mem)); p->apArg = allocSpace(&x, 0, nArg*sizeof(Mem*)); p->apCsr = allocSpace(&x, 0, nCursor*sizeof(VdbeCursor*)); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - p->anExec = allocSpace(&x, 0, p->nOp*sizeof(i64)); -#endif if( x.nNeeded ){ x.pSpace = p->pFree = sqlite3DbMallocRawNN(db, x.nNeeded); x.nFree = x.nNeeded; @@ -83618,9 +87482,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( p->aVar = allocSpace(&x, p->aVar, nVar*sizeof(Mem)); p->apArg = allocSpace(&x, p->apArg, nArg*sizeof(Mem*)); p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*)); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - p->anExec = allocSpace(&x, p->anExec, p->nOp*sizeof(i64)); -#endif } } @@ -83635,9 +87496,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( p->nMem = nMem; initMemArray(p->aMem, nMem, db, MEM_Undefined); memset(p->apCsr, 0, nCursor*sizeof(VdbeCursor*)); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - memset(p->anExec, 0, p->nOp*sizeof(i64)); -#endif } sqlite3VdbeRewind(p); } @@ -83649,7 +87507,23 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady( SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){ if( pCx ) sqlite3VdbeFreeCursorNN(p,pCx); } +static SQLITE_NOINLINE void freeCursorWithCache(Vdbe *p, VdbeCursor *pCx){ + VdbeTxtBlbCache *pCache = pCx->pCache; + assert( pCx->colCache ); + pCx->colCache = 0; + pCx->pCache = 0; + if( pCache->pCValue ){ + sqlite3RCStrUnref(pCache->pCValue); + pCache->pCValue = 0; + } + sqlite3DbFree(p->db, pCache); + sqlite3VdbeFreeCursorNN(p, pCx); +} SQLITE_PRIVATE void sqlite3VdbeFreeCursorNN(Vdbe *p, VdbeCursor *pCx){ + if( pCx->colCache ){ + freeCursorWithCache(p, pCx); + return; + } switch( pCx->eCurType ){ case CURTYPE_SORTER: { sqlite3VdbeSorterClose(p->db, pCx); @@ -83695,9 +87569,6 @@ static void closeCursorsInFrame(Vdbe *p){ SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){ Vdbe *v = pFrame->v; closeCursorsInFrame(v); -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - v->anExec = pFrame->anExec; -#endif v->aOp = pFrame->aOp; v->nOp = pFrame->nOp; v->aMem = pFrame->aMem; @@ -83753,12 +87624,12 @@ SQLITE_PRIVATE void sqlite3VdbeSetNumCols(Vdbe *p, int nResColumn){ int n; sqlite3 *db = p->db; - if( p->nResColumn ){ - releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); + if( p->nResAlloc ){ + releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N); sqlite3DbFree(db, p->aColName); } n = nResColumn*COLNAME_N; - p->nResColumn = (u16)nResColumn; + p->nResColumn = p->nResAlloc = (u16)nResColumn; p->aColName = (Mem*)sqlite3DbMallocRawNN(db, sizeof(Mem)*n ); if( p->aColName==0 ) return; initMemArray(p->aColName, n, db, MEM_Null); @@ -83783,14 +87654,14 @@ SQLITE_PRIVATE int sqlite3VdbeSetColName( ){ int rc; Mem *pColName; - assert( idxnResColumn ); + assert( idxnResAlloc ); assert( vardb->mallocFailed ){ assert( !zName || xDel!=SQLITE_DYNAMIC ); return SQLITE_NOMEM_BKPT; } assert( p->aColName!=0 ); - pColName = &(p->aColName[idx+var*p->nResColumn]); + pColName = &(p->aColName[idx+var*p->nResAlloc]); rc = sqlite3VdbeMemSetStr(pColName, zName, -1, SQLITE_UTF8, xDel); assert( rc!=0 || !zName || (pColName->flags&MEM_Term)!=0 ); return rc; @@ -84078,7 +87949,7 @@ static void checkActiveVdbeCnt(sqlite3 *db){ if( p->readOnly==0 ) nWrite++; if( p->bIsReader ) nRead++; } - p = p->pNext; + p = p->pVNext; } assert( cnt==db->nVdbeActive ); assert( nWrite==db->nVdbeWrite ); @@ -84303,6 +88174,7 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ sqlite3VdbeLeave(p); return SQLITE_BUSY; }else if( rc!=SQLITE_OK ){ + sqlite3SystemError(db, rc); p->rc = rc; sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0; @@ -84312,6 +88184,8 @@ SQLITE_PRIVATE int sqlite3VdbeHalt(Vdbe *p){ db->flags &= ~(u64)SQLITE_DeferFKs; sqlite3CommitInternalChanges(db); } + }else if( p->rc==SQLITE_SCHEMA && db->nVdbeActive>1 ){ + p->nChange = 0; }else{ sqlite3RollbackAll(db, SQLITE_OK); p->nChange = 0; @@ -84501,7 +88375,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ sqlite3DbFree(db, p->zErrMsg); p->zErrMsg = 0; } - p->pResultSet = 0; + p->pResultRow = 0; #ifdef SQLITE_DEBUG p->nWrite = 0; #endif @@ -84529,10 +88403,12 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){ } for(i=0; inOp; i++){ char zHdr[100]; + i64 cnt = p->aOp[i].nExec; + i64 cycles = p->aOp[i].nCycle; sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ", - p->aOp[i].cnt, - p->aOp[i].cycles, - p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0 + cnt, + cycles, + cnt>0 ? cycles/cnt : 0 ); fprintf(out, "%s", zHdr); sqlite3VdbePrintOp(out, i, &p->aOp[i]); @@ -84607,10 +88483,11 @@ SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3 *db, AuxData **pp, int iOp, */ static void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ SubProgram *pSub, *pNext; + assert( db!=0 ); assert( p->db==0 || p->db==db ); if( p->aColName ){ - releaseMemArray(p->aColName, p->nResColumn*COLNAME_N); - sqlite3DbFreeNN(db, p->aColName); + releaseMemArray(p->aColName, p->nResAlloc*COLNAME_N); + sqlite3DbNNFreeNN(db, p->aColName); } for(pSub=p->pProgram; pSub; pSub=pNext){ pNext = pSub->pNext; @@ -84619,17 +88496,17 @@ static void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){ } if( p->eVdbeState!=VDBE_INIT_STATE ){ releaseMemArray(p->aVar, p->nVar); - if( p->pVList ) sqlite3DbFreeNN(db, p->pVList); - if( p->pFree ) sqlite3DbFreeNN(db, p->pFree); + if( p->pVList ) sqlite3DbNNFreeNN(db, p->pVList); + if( p->pFree ) sqlite3DbNNFreeNN(db, p->pFree); } vdbeFreeOpArray(db, p->aOp, p->nOp); - sqlite3DbFree(db, p->zSql); + if( p->zSql ) sqlite3DbNNFreeNN(db, p->zSql); #ifdef SQLITE_ENABLE_NORMALIZE sqlite3DbFree(db, p->zNormSql); { - DblquoteStr *pThis, *pNext; - for(pThis=p->pDblStr; pThis; pThis=pNext){ - pNext = pThis->pNextStr; + DblquoteStr *pThis, *pNxt; + for(pThis=p->pDblStr; pThis; pThis=pNxt){ + pNxt = pThis->pNextStr; sqlite3DbFree(db, pThis); } } @@ -84653,20 +88530,17 @@ SQLITE_PRIVATE void sqlite3VdbeDelete(Vdbe *p){ assert( p!=0 ); db = p->db; + assert( db!=0 ); assert( sqlite3_mutex_held(db->mutex) ); sqlite3VdbeClearObject(db, p); if( db->pnBytesFreed==0 ){ - if( p->pPrev ){ - p->pPrev->pNext = p->pNext; - }else{ - assert( db->pVdbe==p ); - db->pVdbe = p->pNext; - } - if( p->pNext ){ - p->pNext->pPrev = p->pPrev; + assert( p->ppVPrev!=0 ); + *p->ppVPrev = p->pVNext; + if( p->pVNext ){ + p->pVNext->ppVPrev = p->ppVPrev; } } - sqlite3DbFreeNN(db, p); + sqlite3DbNNFreeNN(db, p); } /* @@ -84979,6 +88853,23 @@ static void serialGet( pMem->flags = IsNaN(x) ? MEM_Null : MEM_Real; } } +static int serialGet7( + const unsigned char *buf, /* Buffer to deserialize from */ + Mem *pMem /* Memory cell to write value into */ +){ + u64 x = FOUR_BYTE_UINT(buf); + u32 y = FOUR_BYTE_UINT(buf+4); + x = (x<<32) + y; + assert( sizeof(x)==8 && sizeof(pMem->u.r)==8 ); + swapMixedEndianFloat(x); + memcpy(&pMem->u.r, &x, sizeof(x)); + if( IsNaN(x) ){ + pMem->flags = MEM_Null; + return 1; + } + pMem->flags = MEM_Real; + return 0; +} SQLITE_PRIVATE void sqlite3VdbeSerialGet( const unsigned char *buf, /* Buffer to deserialize from */ u32 serial_type, /* Serial type to deserialize */ @@ -85212,6 +89103,15 @@ static int vdbeRecordCompareDebug( if( d1+(u64)serial_type1+2>(u64)nKey1 && d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)>(u64)nKey1 ){ + if( serial_type1>=1 + && serial_type1<=7 + && d1+(u64)sqlite3VdbeSerialTypeLen(serial_type1)<=(u64)nKey1+8 + && CORRUPT_DB + ){ + return 1; /* corrupt record not detected by + ** sqlite3VdbeRecordCompareWithSkip(). Return true + ** to avoid firing the assert() */ + } break; } @@ -85380,20 +89280,33 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3BlobCompare(const Mem *pB1, const Mem return n1 - n2; } +/* The following two functions are used only within testcase() to prove +** test coverage. These functions do no exist for production builds. +** We must use separate SQLITE_NOINLINE functions here, since otherwise +** optimizer code movement causes gcov to become very confused. +*/ +#if defined(SQLITE_COVERAGE_TEST) || defined(SQLITE_DEBUG) +static int SQLITE_NOINLINE doubleLt(double a, double b){ return a8 ){ + if( sqlite3IsNaN(r) ){ + /* SQLite considers NaN to be a NULL. And all integer values are greater + ** than NULL */ + return 1; + } + if( sqlite3Config.bUseLongDouble ){ LONGDOUBLE_TYPE x = (LONGDOUBLE_TYPE)i; testcase( xr ); testcase( x==r ); - if( xr ) return +1; /*NO_TEST*/ /* work around bugs in gcov */ - return 0; /*NO_TEST*/ /* work around bugs in gcov */ + return (xr); }else{ i64 y; double s; @@ -85403,9 +89316,10 @@ SQLITE_PRIVATE int sqlite3IntFloatCompare(i64 i, double r){ if( iy ) return +1; s = (double)i; - if( sr ) return +1; - return 0; + testcase( doubleLt(s,r) ); + testcase( doubleLt(r,s) ); + testcase( doubleEq(r,s) ); + return (sr); } } @@ -85621,7 +89535,7 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( assert( pPKey2->pKeyInfo->aSortFlags!=0 ); assert( pPKey2->pKeyInfo->nKeyField>0 ); assert( idx1<=szHdr1 || CORRUPT_DB ); - do{ + while( 1 /*exit-by-break*/ ){ u32 serial_type; /* RHS is an integer */ @@ -85631,11 +89545,11 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( serial_type = aKey1[idx1]; testcase( serial_type==12 ); if( serial_type>=10 ){ - rc = +1; + rc = serial_type==10 ? -1 : +1; }else if( serial_type==0 ){ rc = -1; }else if( serial_type==7 ){ - sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); + serialGet7(&aKey1[d1], &mem1); rc = -sqlite3IntFloatCompare(pRhs->u.i, mem1.u.r); }else{ i64 lhs = vdbeRecordDecodeInt(serial_type, &aKey1[d1]); @@ -85655,19 +89569,23 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( /* Serial types 12 or greater are strings and blobs (greater than ** numbers). Types 10 and 11 are currently "reserved for future ** use", so it doesn't really matter what the results of comparing - ** them to numberic values are. */ - rc = +1; + ** them to numeric values are. */ + rc = serial_type==10 ? -1 : +1; }else if( serial_type==0 ){ rc = -1; }else{ - sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); if( serial_type==7 ){ - if( mem1.u.ru.r ){ + if( serialGet7(&aKey1[d1], &mem1) ){ + rc = -1; /* mem1 is a NaN */ + }else if( mem1.u.ru.r ){ rc = -1; }else if( mem1.u.r>pRhs->u.r ){ rc = +1; + }else{ + assert( rc==0 ); } }else{ + sqlite3VdbeSerialGet(&aKey1[d1], serial_type, &mem1); rc = sqlite3IntFloatCompare(mem1.u.i, pRhs->u.r); } } @@ -85737,7 +89655,14 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( /* RHS is null */ else{ serial_type = aKey1[idx1]; - rc = (serial_type!=0); + if( serial_type==0 + || serial_type==10 + || (serial_type==7 && serialGet7(&aKey1[d1], &mem1)!=0) + ){ + assert( rc==0 ); + }else{ + rc = 1; + } } if( rc!=0 ){ @@ -85759,8 +89684,13 @@ SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip( if( i==pPKey2->nField ) break; pRhs++; d1 += sqlite3VdbeSerialTypeLen(serial_type); + if( d1>(unsigned)nKey1 ) break; idx1 += sqlite3VarintLen(serial_type); - }while( idx1<(unsigned)szHdr1 && d1<=(unsigned)nKey1 ); + if( idx1>=(unsigned)szHdr1 ){ + pPKey2->errCode = (u8)SQLITE_CORRUPT_BKPT; + return 0; /* Corrupt index */ + } + } /* No memory allocation is ever used on mem1. Prove this using ** the following assert(). If the assert() fails, it indicates a @@ -86161,7 +90091,7 @@ SQLITE_PRIVATE void sqlite3VdbeCountChanges(Vdbe *v){ */ SQLITE_PRIVATE void sqlite3ExpirePreparedStatements(sqlite3 *db, int iCode){ Vdbe *p; - for(p = db->pVdbe; p; p=p->pNext){ + for(p = db->pVdbe; p; p=p->pVNext){ p->expired = iCode+1; } } @@ -86254,6 +90184,20 @@ SQLITE_PRIVATE int sqlite3NotPureFunc(sqlite3_context *pCtx){ return 1; } +#if defined(SQLITE_ENABLE_CURSOR_HINTS) && defined(SQLITE_DEBUG) +/* +** This Walker callback is used to help verify that calls to +** sqlite3BtreeCursorHint() with opcode BTREE_HINT_RANGE have +** byte-code register values correctly initialized. +*/ +SQLITE_PRIVATE int sqlite3CursorRangeHintExprCheck(Walker *pWalker, Expr *pExpr){ + if( pExpr->op==TK_REGISTER ){ + assert( (pWalker->u.aMem[pExpr->iTable].flags & MEM_Undefined)==0 ); + } + return WRC_Continue; +} +#endif /* SQLITE_ENABLE_CURSOR_HINTS && SQLITE_DEBUG */ + #ifndef SQLITE_OMIT_VIRTUALTABLE /* ** Transfer error message text from an sqlite3_vtab.zErrMsg (text stored @@ -86282,13 +90226,14 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){ ** the vdbeUnpackRecord() function found in vdbeapi.c. */ static void vdbeFreeUnpacked(sqlite3 *db, int nField, UnpackedRecord *p){ + assert( db!=0 ); if( p ){ int i; for(i=0; iaMem[i]; if( pMem->zMalloc ) sqlite3VdbeMemReleaseMalloc(pMem); } - sqlite3DbFreeNN(db, p); + sqlite3DbNNFreeNN(db, p); } } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -86315,6 +90260,16 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( PreUpdate preupdate; const char *zTbl = pTab->zName; static const u8 fakeSortOrder = 0; +#ifdef SQLITE_DEBUG + int nRealCol; + if( pTab->tabFlags & TF_WithoutRowid ){ + nRealCol = sqlite3PrimaryKeyIndex(pTab)->nColumn; + }else if( pTab->tabFlags & TF_HasVirtual ){ + nRealCol = pTab->nNVCol; + }else{ + nRealCol = pTab->nCol; + } +#endif assert( db->pPreUpdate==0 ); memset(&preupdate, 0, sizeof(PreUpdate)); @@ -86331,8 +90286,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( assert( pCsr!=0 ); assert( pCsr->eCurType==CURTYPE_BTREE ); - assert( pCsr->nField==pTab->nCol - || (pCsr->nField==pTab->nCol+1 && op==SQLITE_DELETE && iReg==-1) + assert( pCsr->nField==nRealCol + || (pCsr->nField==nRealCol+1 && op==SQLITE_DELETE && iReg==-1) ); preupdate.v = v; @@ -86359,7 +90314,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( for(i=0; inField; i++){ sqlite3VdbeMemRelease(&preupdate.aNew[i]); } - sqlite3DbFreeNN(db, preupdate.aNew); + sqlite3DbNNFreeNN(db, preupdate.aNew); } } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -86383,6 +90338,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook( */ /* #include "sqliteInt.h" */ /* #include "vdbeInt.h" */ +/* #include "opcodes.h" */ #ifndef SQLITE_OMIT_DEPRECATED /* @@ -86476,7 +90432,9 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt){ if( vdbeSafety(v) ) return SQLITE_MISUSE_BKPT; sqlite3_mutex_enter(db->mutex); checkProfileCallback(db, v); - rc = sqlite3VdbeFinalize(v); + assert( v->eVdbeState>=VDBE_READY_STATE ); + rc = sqlite3VdbeReset(v); + sqlite3VdbeDelete(v); rc = sqlite3ApiExit(db, rc); sqlite3LeaveMutexAndCloseZombie(db); } @@ -86517,7 +90475,15 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt *pStmt){ int rc = SQLITE_OK; Vdbe *p = (Vdbe*)pStmt; #if SQLITE_THREADSAFE - sqlite3_mutex *mutex = ((Vdbe*)pStmt)->db->mutex; + sqlite3_mutex *mutex; +#endif +#ifdef SQLITE_ENABLE_API_ARMOR + if( pStmt==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif +#if SQLITE_THREADSAFE + mutex = p->db->mutex; #endif sqlite3_mutex_enter(mutex); for(i=0; inVar; i++){ @@ -86636,7 +90602,7 @@ SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){ SQLITE_NULL, /* 0x1f (not possible) */ SQLITE_FLOAT, /* 0x20 INTREAL */ SQLITE_NULL, /* 0x21 (not possible) */ - SQLITE_TEXT, /* 0x22 INTREAL + TEXT */ + SQLITE_FLOAT, /* 0x22 INTREAL + TEXT */ SQLITE_NULL, /* 0x23 (not possible) */ SQLITE_FLOAT, /* 0x24 (not possible) */ SQLITE_NULL, /* 0x25 (not possible) */ @@ -86684,6 +90650,9 @@ SQLITE_API int sqlite3_value_type(sqlite3_value* pVal){ #endif return aType[pVal->flags&MEM_AffMask]; } +SQLITE_API int sqlite3_value_encoding(sqlite3_value *pVal){ + return pVal->enc; +} /* Return true if a parameter to xUpdate represents an unchanged column */ SQLITE_API int sqlite3_value_nochange(sqlite3_value *pVal){ @@ -86737,7 +90706,7 @@ SQLITE_API void sqlite3_value_free(sqlite3_value *pOld){ ** is too big or if an OOM occurs. ** ** The invokeValueDestructor(P,X) routine invokes destructor function X() -** on value P is not going to be used and need to be destroyed. +** on value P if P is not going to be used and need to be destroyed. */ static void setResultStrOrError( sqlite3_context *pCtx, /* Function context */ @@ -86767,7 +90736,7 @@ static void setResultStrOrError( static int invokeValueDestructor( const void *p, /* Value to destroy */ void (*xDel)(void*), /* The destructor */ - sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if no NULL */ + sqlite3_context *pCtx /* Set a SQLITE_TOOBIG error if not NULL */ ){ assert( xDel!=SQLITE_DYNAMIC ); if( xDel==0 ){ @@ -86777,7 +90746,14 @@ static int invokeValueDestructor( }else{ xDel((void*)p); } +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx!=0 ){ + sqlite3_result_error_toobig(pCtx); + } +#else + assert( pCtx!=0 ); sqlite3_result_error_toobig(pCtx); +#endif return SQLITE_TOOBIG; } SQLITE_API void sqlite3_result_blob( @@ -86786,6 +90762,12 @@ SQLITE_API void sqlite3_result_blob( int n, void (*xDel)(void *) ){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 || n<0 ){ + invokeValueDestructor(z, xDel, pCtx); + return; + } +#endif assert( n>=0 ); assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, 0, xDel); @@ -86796,8 +90778,14 @@ SQLITE_API void sqlite3_result_blob64( sqlite3_uint64 n, void (*xDel)(void *) ){ - assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); assert( xDel!=SQLITE_DYNAMIC ); +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(z, xDel, 0); + return; + } +#endif + assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); if( n>0x7fffffff ){ (void)invokeValueDestructor(z, xDel, pCtx); }else{ @@ -86805,30 +90793,48 @@ SQLITE_API void sqlite3_result_blob64( } } SQLITE_API void sqlite3_result_double(sqlite3_context *pCtx, double rVal){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetDouble(pCtx->pOut, rVal); } SQLITE_API void sqlite3_result_error(sqlite3_context *pCtx, const char *z, int n){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF8, SQLITE_TRANSIENT); } #ifndef SQLITE_OMIT_UTF16 SQLITE_API void sqlite3_result_error16(sqlite3_context *pCtx, const void *z, int n){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_ERROR; sqlite3VdbeMemSetStr(pCtx->pOut, z, n, SQLITE_UTF16NATIVE, SQLITE_TRANSIENT); } #endif SQLITE_API void sqlite3_result_int(sqlite3_context *pCtx, int iVal){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetInt64(pCtx->pOut, (i64)iVal); } SQLITE_API void sqlite3_result_int64(sqlite3_context *pCtx, i64 iVal){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetInt64(pCtx->pOut, iVal); } SQLITE_API void sqlite3_result_null(sqlite3_context *pCtx){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetNull(pCtx->pOut); } @@ -86838,14 +90844,37 @@ SQLITE_API void sqlite3_result_pointer( const char *zPType, void (*xDestructor)(void*) ){ - Mem *pOut = pCtx->pOut; + Mem *pOut; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(pPtr, xDestructor, 0); + return; + } +#endif + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); sqlite3VdbeMemRelease(pOut); pOut->flags = MEM_Null; sqlite3VdbeMemSetPointer(pOut, pPtr, zPType, xDestructor); } SQLITE_API void sqlite3_result_subtype(sqlite3_context *pCtx, unsigned int eSubtype){ - Mem *pOut = pCtx->pOut; + Mem *pOut; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif +#if defined(SQLITE_STRICT_SUBTYPE) && SQLITE_STRICT_SUBTYPE+0!=0 + if( pCtx->pFunc!=0 + && (pCtx->pFunc->funcFlags & SQLITE_RESULT_SUBTYPE)==0 + ){ + char zErr[200]; + sqlite3_snprintf(sizeof(zErr), zErr, + "misuse of sqlite3_result_subtype() by %s()", + pCtx->pFunc->zName); + sqlite3_result_error(pCtx, zErr, -1); + return; + } +#endif /* SQLITE_STRICT_SUBTYPE */ + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); pOut->eSubtype = eSubtype & 0xff; pOut->flags |= MEM_Subtype; @@ -86856,6 +90885,12 @@ SQLITE_API void sqlite3_result_text( int n, void (*xDel)(void *) ){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(z, xDel, 0); + return; + } +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); setResultStrOrError(pCtx, z, n, SQLITE_UTF8, xDel); } @@ -86866,13 +90901,23 @@ SQLITE_API void sqlite3_result_text64( void (*xDel)(void *), unsigned char enc ){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ){ + invokeValueDestructor(z, xDel, 0); + return; + } +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); assert( xDel!=SQLITE_DYNAMIC ); - if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + if( enc!=SQLITE_UTF8 ){ + if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + n &= ~(u64)1; + } if( n>0x7fffffff ){ (void)invokeValueDestructor(z, xDel, pCtx); }else{ setResultStrOrError(pCtx, z, (int)n, enc, xDel); + sqlite3VdbeMemZeroTerminateIfAble(pCtx->pOut); } } #ifndef SQLITE_OMIT_UTF16 @@ -86883,7 +90928,7 @@ SQLITE_API void sqlite3_result_text16( void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel); + setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16NATIVE, xDel); } SQLITE_API void sqlite3_result_text16be( sqlite3_context *pCtx, @@ -86892,7 +90937,7 @@ SQLITE_API void sqlite3_result_text16be( void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel); + setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16BE, xDel); } SQLITE_API void sqlite3_result_text16le( sqlite3_context *pCtx, @@ -86901,11 +90946,20 @@ SQLITE_API void sqlite3_result_text16le( void (*xDel)(void *) ){ assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); - setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel); + setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16LE, xDel); } #endif /* SQLITE_OMIT_UTF16 */ SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){ - Mem *pOut = pCtx->pOut; + Mem *pOut; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; + if( pValue==0 ){ + sqlite3_result_null(pCtx); + return; + } +#endif + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemCopy(pOut, pValue); sqlite3VdbeChangeEncoding(pOut, pCtx->enc); @@ -86917,7 +90971,12 @@ SQLITE_API void sqlite3_result_zeroblob(sqlite3_context *pCtx, int n){ sqlite3_result_zeroblob64(pCtx, n>0 ? n : 0); } SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){ - Mem *pOut = pCtx->pOut; + Mem *pOut; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return SQLITE_MISUSE_BKPT; +#endif + pOut = pCtx->pOut; assert( sqlite3_mutex_held(pOut->db->mutex) ); if( n>(u64)pOut->db->aLimit[SQLITE_LIMIT_LENGTH] ){ sqlite3_result_error_toobig(pCtx); @@ -86931,6 +90990,9 @@ SQLITE_API int sqlite3_result_zeroblob64(sqlite3_context *pCtx, u64 n){ #endif } SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif pCtx->isError = errCode ? errCode : -1; #ifdef SQLITE_DEBUG if( pCtx->pVdbe ) pCtx->pVdbe->rcApp = errCode; @@ -86943,6 +91005,9 @@ SQLITE_API void sqlite3_result_error_code(sqlite3_context *pCtx, int errCode){ /* Force an SQLITE_TOOBIG error. */ SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); pCtx->isError = SQLITE_TOOBIG; sqlite3VdbeMemSetStr(pCtx->pOut, "string or blob too big", -1, @@ -86951,6 +91016,9 @@ SQLITE_API void sqlite3_result_error_toobig(sqlite3_context *pCtx){ /* An SQLITE_NOMEM error. */ SQLITE_API void sqlite3_result_error_nomem(sqlite3_context *pCtx){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); sqlite3VdbeMemSetNull(pCtx->pOut); pCtx->isError = SQLITE_NOMEM_BKPT; @@ -87112,7 +91180,7 @@ static int sqlite3Step(Vdbe *p){ /* If the statement completed successfully, invoke the profile callback */ checkProfileCallback(db, p); #endif - + p->pResultRow = 0; if( rc==SQLITE_DONE && db->autoCommit ){ assert( p->rc==SQLITE_OK ); p->rc = doWalCallbacks(db); @@ -87203,6 +91271,9 @@ SQLITE_API int sqlite3_step(sqlite3_stmt *pStmt){ ** pointer to it. */ SQLITE_API void *sqlite3_user_data(sqlite3_context *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return 0; +#endif assert( p && p->pFunc ); return p->pFunc->pUserData; } @@ -87218,7 +91289,11 @@ SQLITE_API void *sqlite3_user_data(sqlite3_context *p){ ** application defined function. */ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return 0; +#else assert( p && p->pOut ); +#endif return p->pOut->db; } @@ -87237,10 +91312,25 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context *p){ ** value, as a signal to the xUpdate routine that the column is unchanged. */ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return 0; +#else assert( p ); +#endif return sqlite3_value_nochange(p->pOut); } +/* +** The destructor function for a ValueList object. This needs to be +** a separate function, unknowable to the application, to ensure that +** calls to sqlite3_vtab_in_first()/sqlite3_vtab_in_next() that are not +** preceded by activation of IN processing via sqlite3_vtab_int() do not +** try to access a fake ValueList object inserted by a hostile extension. +*/ +SQLITE_PRIVATE void sqlite3VdbeValueListFree(void *pToDelete){ + sqlite3_free(pToDelete); +} + /* ** Implementation of sqlite3_vtab_in_first() (if bNext==0) and ** sqlite3_vtab_in_next() (if bNext!=0). @@ -87254,9 +91344,16 @@ static int valueFromValueList( ValueList *pRhs; *ppOut = 0; - if( pVal==0 ) return SQLITE_MISUSE; - pRhs = (ValueList*)sqlite3_value_pointer(pVal, "ValueList"); - if( pRhs==0 ) return SQLITE_MISUSE; + if( pVal==0 ) return SQLITE_MISUSE_BKPT; + if( (pVal->flags & MEM_Dyn)==0 || pVal->xDel!=sqlite3VdbeValueListFree ){ + return SQLITE_ERROR; + }else{ + assert( (pVal->flags&(MEM_TypeMask|MEM_Term|MEM_Subtype)) == + (MEM_Null|MEM_Term|MEM_Subtype) ); + assert( pVal->eSubtype=='p' ); + assert( pVal->u.zPType!=0 && strcmp(pVal->u.zPType,"ValueList")==0 ); + pRhs = (ValueList*)pVal->z; + } if( bNext ){ rc = sqlite3BtreeNext(pRhs->pCsr, 0); }else{ @@ -87378,6 +91475,9 @@ SQLITE_API void *sqlite3_aggregate_context(sqlite3_context *p, int nByte){ SQLITE_API void *sqlite3_get_auxdata(sqlite3_context *pCtx, int iArg){ AuxData *pAuxData; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return 0; +#endif assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); #if SQLITE_ENABLE_STAT4 if( pCtx->pVdbe==0 ) return 0; @@ -87410,8 +91510,12 @@ SQLITE_API void sqlite3_set_auxdata( void (*xDelete)(void*) ){ AuxData *pAuxData; - Vdbe *pVdbe = pCtx->pVdbe; + Vdbe *pVdbe; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pCtx==0 ) return; +#endif + pVdbe= pCtx->pVdbe; assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) ); #ifdef SQLITE_ENABLE_STAT4 if( pVdbe==0 ) goto failed; @@ -87467,7 +91571,8 @@ SQLITE_API int sqlite3_aggregate_count(sqlite3_context *p){ */ SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){ Vdbe *pVm = (Vdbe *)pStmt; - return pVm ? pVm->nResColumn : 0; + if( pVm==0 ) return 0; + return pVm->nResColumn; } /* @@ -87476,7 +91581,7 @@ SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){ */ SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){ Vdbe *pVm = (Vdbe *)pStmt; - if( pVm==0 || pVm->pResultSet==0 ) return 0; + if( pVm==0 || pVm->pResultRow==0 ) return 0; return pVm->nResColumn; } @@ -87531,8 +91636,8 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){ if( pVm==0 ) return (Mem*)columnNullValue(); assert( pVm->db ); sqlite3_mutex_enter(pVm->db->mutex); - if( pVm->pResultSet!=0 && inResColumn && i>=0 ){ - pOut = &pVm->pResultSet[i]; + if( pVm->pResultRow!=0 && inResColumn && i>=0 ){ + pOut = &pVm->pResultRow[i]; }else{ sqlite3Error(pVm->db, SQLITE_RANGE); pOut = (Mem*)columnNullValue(); @@ -87556,7 +91661,7 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){ ** sqlite3_column_real() ** sqlite3_column_bytes() ** sqlite3_column_bytes16() -** sqiite3_column_blob() +** sqlite3_column_blob() */ static void columnMallocFailure(sqlite3_stmt *pStmt) { @@ -87640,6 +91745,32 @@ SQLITE_API int sqlite3_column_type(sqlite3_stmt *pStmt, int i){ return iType; } +/* +** Column names appropriate for EXPLAIN or EXPLAIN QUERY PLAN. +*/ +static const char * const azExplainColNames8[] = { + "addr", "opcode", "p1", "p2", "p3", "p4", "p5", "comment", /* EXPLAIN */ + "id", "parent", "notused", "detail" /* EQP */ +}; +static const u16 azExplainColNames16data[] = { + /* 0 */ 'a', 'd', 'd', 'r', 0, + /* 5 */ 'o', 'p', 'c', 'o', 'd', 'e', 0, + /* 12 */ 'p', '1', 0, + /* 15 */ 'p', '2', 0, + /* 18 */ 'p', '3', 0, + /* 21 */ 'p', '4', 0, + /* 24 */ 'p', '5', 0, + /* 27 */ 'c', 'o', 'm', 'm', 'e', 'n', 't', 0, + /* 35 */ 'i', 'd', 0, + /* 38 */ 'p', 'a', 'r', 'e', 'n', 't', 0, + /* 45 */ 'n', 'o', 't', 'u', 's', 'e', 'd', 0, + /* 53 */ 'd', 'e', 't', 'a', 'i', 'l', 0 +}; +static const u8 iExplainColNames16[] = { + 0, 5, 12, 15, 18, 21, 24, 27, + 35, 38, 45, 53 +}; + /* ** Convert the N-th element of pStmt->pColName[] into a string using ** xFunc() then return that string. If N is out of range, return 0. @@ -87672,15 +91803,29 @@ static const void *columnName( return 0; } #endif + if( N<0 ) return 0; ret = 0; p = (Vdbe *)pStmt; db = p->db; assert( db!=0 ); - n = sqlite3_column_count(pStmt); - if( N=0 ){ + sqlite3_mutex_enter(db->mutex); + + if( p->explain ){ + if( useType>0 ) goto columnName_end; + n = p->explain==1 ? 8 : 4; + if( N>=n ) goto columnName_end; + if( useUtf16 ){ + int i = iExplainColNames16[N + 8*p->explain - 8]; + ret = (void*)&azExplainColNames16data[i]; + }else{ + ret = (void*)azExplainColNames8[N + 8*p->explain - 8]; + } + goto columnName_end; + } + n = p->nResColumn; + if( NmallocFailed; N += useType*n; - sqlite3_mutex_enter(db->mutex); - assert( db->mallocFailed==0 ); #ifndef SQLITE_OMIT_UTF16 if( useUtf16 ){ ret = sqlite3_value_text16((sqlite3_value*)&p->aColName[N]); @@ -87692,12 +91837,14 @@ static const void *columnName( /* A malloc may have failed inside of the _text() call. If this ** is the case, clear the mallocFailed flag and return NULL. */ - if( db->mallocFailed ){ + assert( db->mallocFailed==0 || db->mallocFailed==1 ); + if( db->mallocFailed > prior_mallocFailed ){ sqlite3OomClear(db); ret = 0; } - sqlite3_mutex_leave(db->mutex); } +columnName_end: + sqlite3_mutex_leave(db->mutex); return ret; } @@ -87790,7 +91937,7 @@ SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){ /* ** Unbind the value bound to variable i in virtual machine p. This is the ** the same as binding a NULL value to the column. If the "i" parameter is -** out of range, then SQLITE_RANGE is returned. Othewise SQLITE_OK. +** out of range, then SQLITE_RANGE is returned. Otherwise SQLITE_OK. ** ** A successful evaluation of this routine acquires the mutex on p. ** the mutex is released if any kind of error occurs. @@ -87798,25 +91945,24 @@ SQLITE_API const void *sqlite3_column_origin_name16(sqlite3_stmt *pStmt, int N){ ** The error code stored in database p->db is overwritten with the return ** value in any case. */ -static int vdbeUnbind(Vdbe *p, int i){ +static int vdbeUnbind(Vdbe *p, unsigned int i){ Mem *pVar; if( vdbeSafetyNotNull(p) ){ return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(p->db->mutex); if( p->eVdbeState!=VDBE_READY_STATE ){ - sqlite3Error(p->db, SQLITE_MISUSE); + sqlite3Error(p->db, SQLITE_MISUSE_BKPT); sqlite3_mutex_leave(p->db->mutex); sqlite3_log(SQLITE_MISUSE, "bind on a busy prepared statement: [%s]", p->zSql); return SQLITE_MISUSE_BKPT; } - if( i<1 || i>p->nVar ){ + if( i>=(unsigned int)p->nVar ){ sqlite3Error(p->db, SQLITE_RANGE); sqlite3_mutex_leave(p->db->mutex); return SQLITE_RANGE; } - i--; pVar = &p->aVar[i]; sqlite3VdbeMemRelease(pVar); pVar->flags = MEM_Null; @@ -87853,7 +91999,7 @@ static int bindText( Mem *pVar; int rc; - rc = vdbeUnbind(p, i); + rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ if( zData!=0 ){ pVar = &p->aVar[i-1]; @@ -87902,7 +92048,7 @@ SQLITE_API int sqlite3_bind_blob64( SQLITE_API int sqlite3_bind_double(sqlite3_stmt *pStmt, int i, double rValue){ int rc; Vdbe *p = (Vdbe *)pStmt; - rc = vdbeUnbind(p, i); + rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetDouble(&p->aVar[i-1], rValue); sqlite3_mutex_leave(p->db->mutex); @@ -87915,7 +92061,7 @@ SQLITE_API int sqlite3_bind_int(sqlite3_stmt *p, int i, int iValue){ SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValue){ int rc; Vdbe *p = (Vdbe *)pStmt; - rc = vdbeUnbind(p, i); + rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetInt64(&p->aVar[i-1], iValue); sqlite3_mutex_leave(p->db->mutex); @@ -87925,7 +92071,7 @@ SQLITE_API int sqlite3_bind_int64(sqlite3_stmt *pStmt, int i, sqlite_int64 iValu SQLITE_API int sqlite3_bind_null(sqlite3_stmt *pStmt, int i){ int rc; Vdbe *p = (Vdbe*)pStmt; - rc = vdbeUnbind(p, i); + rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ sqlite3_mutex_leave(p->db->mutex); } @@ -87940,7 +92086,7 @@ SQLITE_API int sqlite3_bind_pointer( ){ int rc; Vdbe *p = (Vdbe*)pStmt; - rc = vdbeUnbind(p, i); + rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ sqlite3VdbeMemSetPointer(&p->aVar[i-1], pPtr, zPTtype, xDestructor); sqlite3_mutex_leave(p->db->mutex); @@ -87967,7 +92113,10 @@ SQLITE_API int sqlite3_bind_text64( unsigned char enc ){ assert( xDel!=SQLITE_DYNAMIC ); - if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + if( enc!=SQLITE_UTF8 ){ + if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE; + nData &= ~(u16)1; + } return bindText(pStmt, i, zData, nData, xDel, enc); } #ifndef SQLITE_OMIT_UTF16 @@ -87975,10 +92124,10 @@ SQLITE_API int sqlite3_bind_text16( sqlite3_stmt *pStmt, int i, const void *zData, - int nData, + int n, void (*xDel)(void*) ){ - return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE); + return bindText(pStmt, i, zData, n & ~(u64)1, xDel, SQLITE_UTF16NATIVE); } #endif /* SQLITE_OMIT_UTF16 */ SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){ @@ -88018,7 +92167,7 @@ SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_valu SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ int rc; Vdbe *p = (Vdbe *)pStmt; - rc = vdbeUnbind(p, i); + rc = vdbeUnbind(p, (u32)(i-1)); if( rc==SQLITE_OK ){ #ifndef SQLITE_OMIT_INCRBLOB sqlite3VdbeMemSetZeroBlob(&p->aVar[i-1], n); @@ -88032,6 +92181,9 @@ SQLITE_API int sqlite3_bind_zeroblob(sqlite3_stmt *pStmt, int i, int n){ SQLITE_API int sqlite3_bind_zeroblob64(sqlite3_stmt *pStmt, int i, sqlite3_uint64 n){ int rc; Vdbe *p = (Vdbe *)pStmt; +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 ) return SQLITE_MISUSE_BKPT; +#endif sqlite3_mutex_enter(p->db->mutex); if( n>(u64)p->db->aLimit[SQLITE_LIMIT_LENGTH] ){ rc = SQLITE_TOOBIG; @@ -88152,6 +92304,42 @@ SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt){ return pStmt ? ((Vdbe*)pStmt)->explain : 0; } +/* +** Set the explain mode for a statement. +*/ +SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode){ + Vdbe *v = (Vdbe*)pStmt; + int rc; +#ifdef SQLITE_ENABLE_API_ARMOR + if( pStmt==0 ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(v->db->mutex); + if( ((int)v->explain)==eMode ){ + rc = SQLITE_OK; + }else if( eMode<0 || eMode>2 ){ + rc = SQLITE_ERROR; + }else if( (v->prepFlags & SQLITE_PREPARE_SAVESQL)==0 ){ + rc = SQLITE_ERROR; + }else if( v->eVdbeState!=VDBE_READY_STATE ){ + rc = SQLITE_BUSY; + }else if( v->nMem>=10 && (eMode!=2 || v->haveEqpOps) ){ + /* No reprepare necessary */ + v->explain = eMode; + rc = SQLITE_OK; + }else{ + v->explain = eMode; + rc = sqlite3Reprepare(v); + v->haveEqpOps = eMode==2; + } + if( v->explain ){ + v->nResColumn = 12 - 4*v->explain; + }else{ + v->nResColumn = v->nResAlloc; + } + sqlite3_mutex_leave(v->db->mutex); + return rc; +} + /* ** Return true if the prepared statement is in need of being reset. */ @@ -88178,7 +92366,7 @@ SQLITE_API sqlite3_stmt *sqlite3_next_stmt(sqlite3 *pDb, sqlite3_stmt *pStmt){ if( pStmt==0 ){ pNext = (sqlite3_stmt*)pDb->pVdbe; }else{ - pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pNext; + pNext = (sqlite3_stmt*)((Vdbe*)pStmt)->pVNext; } sqlite3_mutex_leave(pDb->mutex); return pNext; @@ -88203,8 +92391,11 @@ SQLITE_API int sqlite3_stmt_status(sqlite3_stmt *pStmt, int op, int resetFlag){ sqlite3_mutex_enter(db->mutex); v = 0; db->pnBytesFreed = (int*)&v; + assert( db->lookaside.pEnd==db->lookaside.pTrueEnd ); + db->lookaside.pEnd = db->lookaside.pStart; sqlite3VdbeDelete(pVdbe); db->pnBytesFreed = 0; + db->lookaside.pEnd = db->lookaside.pTrueEnd; sqlite3_mutex_leave(db->mutex); }else{ v = pVdbe->aCounter[op]; @@ -88288,10 +92479,16 @@ static UnpackedRecord *vdbeUnpackRecord( ** a field of the row currently being updated or deleted. */ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){ - PreUpdate *p = db->pPreUpdate; + PreUpdate *p; Mem *pMem; int rc = SQLITE_OK; +#ifdef SQLITE_ENABLE_API_ARMOR + if( db==0 || ppValue==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + p = db->pPreUpdate; /* Test that this call is being made from within an SQLITE_DELETE or ** SQLITE_UPDATE pre-update callback, and that iIdx is within range. */ if( !p || p->op==SQLITE_INSERT ){ @@ -88352,7 +92549,12 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa ** the number of columns in the row being updated, deleted or inserted. */ SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){ - PreUpdate *p = db->pPreUpdate; + PreUpdate *p; +#ifdef SQLITE_ENABLE_API_ARMOR + p = db!=0 ? db->pPreUpdate : 0; +#else + p = db->pPreUpdate; +#endif return (p ? p->keyinfo.nKeyField : 0); } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -88370,7 +92572,12 @@ SQLITE_API int sqlite3_preupdate_count(sqlite3 *db){ ** or SET DEFAULT action is considered a trigger. */ SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){ - PreUpdate *p = db->pPreUpdate; + PreUpdate *p; +#ifdef SQLITE_ENABLE_API_ARMOR + p = db!=0 ? db->pPreUpdate : 0; +#else + p = db->pPreUpdate; +#endif return (p ? p->v->nFrame : 0); } #endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ @@ -88381,7 +92588,12 @@ SQLITE_API int sqlite3_preupdate_depth(sqlite3 *db){ ** only. */ SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *db){ - PreUpdate *p = db->pPreUpdate; + PreUpdate *p; +#ifdef SQLITE_ENABLE_API_ARMOR + p = db!=0 ? db->pPreUpdate : 0; +#else + p = db->pPreUpdate; +#endif return (p ? p->iBlobWrite : -1); } #endif @@ -88392,10 +92604,16 @@ SQLITE_API int sqlite3_preupdate_blobwrite(sqlite3 *db){ ** a field of the row currently being updated or inserted. */ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppValue){ - PreUpdate *p = db->pPreUpdate; + PreUpdate *p; int rc = SQLITE_OK; Mem *pMem; +#ifdef SQLITE_ENABLE_API_ARMOR + if( db==0 || ppValue==0 ){ + return SQLITE_MISUSE_BKPT; + } +#endif + p = db->pPreUpdate; if( !p || p->op==SQLITE_DELETE ){ rc = SQLITE_MISUSE_BKPT; goto preupdate_new_out; @@ -88466,23 +92684,78 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa /* ** Return status data for a single loop within query pStmt. */ -SQLITE_API int sqlite3_stmt_scanstatus( +SQLITE_API int sqlite3_stmt_scanstatus_v2( sqlite3_stmt *pStmt, /* Prepared statement being queried */ - int idx, /* Index of loop to report on */ + int iScan, /* Index of loop to report on */ int iScanStatusOp, /* Which metric to return */ + int flags, void *pOut /* OUT: Write the answer here */ ){ Vdbe *p = (Vdbe*)pStmt; - ScanStatus *pScan; - if( idx<0 || idx>=p->nScan ) return 1; - pScan = &p->aScan[idx]; + VdbeOp *aOp; + int nOp; + ScanStatus *pScan = 0; + int idx; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( p==0 || pOut==0 + || iScanStatusOpSQLITE_SCANSTAT_NCYCLE ){ + return 1; + } +#endif + aOp = p->aOp; + nOp = p->nOp; + if( p->pFrame ){ + VdbeFrame *pFrame; + for(pFrame=p->pFrame; pFrame->pParent; pFrame=pFrame->pParent); + aOp = pFrame->aOp; + nOp = pFrame->nOp; + } + + if( iScan<0 ){ + int ii; + if( iScanStatusOp==SQLITE_SCANSTAT_NCYCLE ){ + i64 res = 0; + for(ii=0; iiaScan[idx]; + }else{ + /* If the COMPLEX flag is clear, then this function must ignore any + ** ScanStatus structures with ScanStatus.addrLoop set to 0. */ + for(idx=0; idxnScan; idx++){ + pScan = &p->aScan[idx]; + if( pScan->zName ){ + iScan--; + if( iScan<0 ) break; + } + } + } + if( idx>=p->nScan ) return 1; + switch( iScanStatusOp ){ case SQLITE_SCANSTAT_NLOOP: { - *(sqlite3_int64*)pOut = p->anExec[pScan->addrLoop]; + if( pScan->addrLoop>0 ){ + *(sqlite3_int64*)pOut = aOp[pScan->addrLoop].nExec; + }else{ + *(sqlite3_int64*)pOut = -1; + } break; } case SQLITE_SCANSTAT_NVISIT: { - *(sqlite3_int64*)pOut = p->anExec[pScan->addrVisit]; + if( pScan->addrVisit>0 ){ + *(sqlite3_int64*)pOut = aOp[pScan->addrVisit].nExec; + }else{ + *(sqlite3_int64*)pOut = -1; + } break; } case SQLITE_SCANSTAT_EST: { @@ -88501,7 +92774,7 @@ SQLITE_API int sqlite3_stmt_scanstatus( } case SQLITE_SCANSTAT_EXPLAIN: { if( pScan->addrExplain ){ - *(const char**)pOut = p->aOp[ pScan->addrExplain ].p4.z; + *(const char**)pOut = aOp[ pScan->addrExplain ].p4.z; }else{ *(const char**)pOut = 0; } @@ -88509,12 +92782,51 @@ SQLITE_API int sqlite3_stmt_scanstatus( } case SQLITE_SCANSTAT_SELECTID: { if( pScan->addrExplain ){ - *(int*)pOut = p->aOp[ pScan->addrExplain ].p1; + *(int*)pOut = aOp[ pScan->addrExplain ].p1; + }else{ + *(int*)pOut = -1; + } + break; + } + case SQLITE_SCANSTAT_PARENTID: { + if( pScan->addrExplain ){ + *(int*)pOut = aOp[ pScan->addrExplain ].p2; }else{ *(int*)pOut = -1; } break; } + case SQLITE_SCANSTAT_NCYCLE: { + i64 res = 0; + if( pScan->aAddrRange[0]==0 ){ + res = -1; + }else{ + int ii; + for(ii=0; iiaAddrRange); ii+=2){ + int iIns = pScan->aAddrRange[ii]; + int iEnd = pScan->aAddrRange[ii+1]; + if( iIns==0 ) break; + if( iIns>0 ){ + while( iIns<=iEnd ){ + res += aOp[iIns].nCycle; + iIns++; + } + }else{ + int iOp; + for(iOp=0; iOpp1!=iEnd ) continue; + if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_NCYCLE)==0 ){ + continue; + } + res += aOp[iOp].nCycle; + } + } + } + } + *(i64*)pOut = res; + break; + } default: { return 1; } @@ -88522,12 +92834,29 @@ SQLITE_API int sqlite3_stmt_scanstatus( return 0; } +/* +** Return status data for a single loop within query pStmt. +*/ +SQLITE_API int sqlite3_stmt_scanstatus( + sqlite3_stmt *pStmt, /* Prepared statement being queried */ + int iScan, /* Index of loop to report on */ + int iScanStatusOp, /* Which metric to return */ + void *pOut /* OUT: Write the answer here */ +){ + return sqlite3_stmt_scanstatus_v2(pStmt, iScan, iScanStatusOp, 0, pOut); +} + /* ** Zero all counters associated with the sqlite3_stmt_scanstatus() data. */ SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){ Vdbe *p = (Vdbe*)pStmt; - memset(p->anExec, 0, p->nOp * sizeof(i64)); + int ii; + for(ii=0; p!=0 && iinOp; ii++){ + Op *pOp = &p->aOp[ii]; + pOp->nExec = 0; + pOp->nCycle = 0; + } } #endif /* SQLITE_ENABLE_STMT_SCANSTATUS */ @@ -88862,8 +93191,12 @@ SQLITE_API int sqlite3_found_count = 0; ** sqlite3CantopenError(lineno) */ static void test_trace_breakpoint(int pc, Op *pOp, Vdbe *v){ - static int n = 0; + static u64 n = 0; + (void)pc; + (void)pOp; + (void)v; n++; + if( n==LARGEST_UINT64 ) abort(); /* So that n is used, preventing a warning */ } #endif @@ -89044,7 +93377,8 @@ static VdbeCursor *allocateCursor( ** return false. */ static int alsoAnInt(Mem *pRec, double rValue, i64 *piValue){ - i64 iValue = (double)rValue; + i64 iValue; + iValue = sqlite3RealToI64(rValue); if( sqlite3RealSameAsInt(rValue,iValue) ){ *piValue = iValue; return 1; @@ -89100,6 +93434,10 @@ static void applyNumericAffinity(Mem *pRec, int bTryForInt){ ** always preferred, even if the affinity is REAL, because ** an integer representation is more space efficient on disk. ** +** SQLITE_AFF_FLEXNUM: +** If the value is text, then try to convert it into a number of +** some kind (integer or real) but do not make any other changes. +** ** SQLITE_AFF_TEXT: ** Convert pRec to a text representation. ** @@ -89114,11 +93452,11 @@ static void applyAffinity( ){ if( affinity>=SQLITE_AFF_NUMERIC ){ assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL - || affinity==SQLITE_AFF_NUMERIC ); + || affinity==SQLITE_AFF_NUMERIC || affinity==SQLITE_AFF_FLEXNUM ); if( (pRec->flags & MEM_Int)==0 ){ /*OPTIMIZATION-IF-FALSE*/ - if( (pRec->flags & MEM_Real)==0 ){ + if( (pRec->flags & (MEM_Real|MEM_IntReal))==0 ){ if( pRec->flags & MEM_Str ) applyNumericAffinity(pRec,1); - }else{ + }else if( affinity<=SQLITE_AFF_REAL ){ sqlite3VdbeIntegerAffinity(pRec); } } @@ -89206,17 +93544,18 @@ static u16 SQLITE_NOINLINE computeNumericType(Mem *pMem){ ** But it does set pMem->u.r and pMem->u.i appropriately. */ static u16 numericType(Mem *pMem){ - if( pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal) ){ + assert( (pMem->flags & MEM_Null)==0 + || pMem->db==0 || pMem->db->mallocFailed ); + if( pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal|MEM_Null) ){ testcase( pMem->flags & MEM_Int ); testcase( pMem->flags & MEM_Real ); testcase( pMem->flags & MEM_IntReal ); - return pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal); - } - if( pMem->flags & (MEM_Str|MEM_Blob) ){ - testcase( pMem->flags & MEM_Str ); - testcase( pMem->flags & MEM_Blob ); - return computeNumericType(pMem); + return pMem->flags & (MEM_Int|MEM_Real|MEM_IntReal|MEM_Null); } + assert( pMem->flags & (MEM_Str|MEM_Blob) ); + testcase( pMem->flags & MEM_Str ); + testcase( pMem->flags & MEM_Blob ); + return computeNumericType(pMem); return 0; } @@ -89277,6 +93616,9 @@ SQLITE_PRIVATE void sqlite3VdbeMemPrettyPrint(Mem *pMem, StrAccum *pStr){ sqlite3_str_appendchar(pStr, 1, (c>=0x20&&c<=0x7f) ? c : '.'); } sqlite3_str_appendf(pStr, "]%s", encnames[pMem->enc]); + if( f & MEM_Term ){ + sqlite3_str_appendf(pStr, "(0-term)"); + } } } #endif @@ -89345,17 +93687,6 @@ SQLITE_PRIVATE void sqlite3VdbeRegisterDump(Vdbe *v){ # define REGISTER_TRACE(R,M) #endif - -#ifdef VDBE_PROFILE - -/* -** hwtime.h contains inline assembler code for implementing -** high-performance timing routines. -*/ -/* #include "hwtime.h" */ - -#endif - #ifndef NDEBUG /* ** This function is only called from within an assert() expression. It @@ -89415,13 +93746,102 @@ static u64 filterHash(const Mem *aMem, const Op *pOp){ }else if( p->flags & MEM_Real ){ h += sqlite3VdbeIntValue(p); }else if( p->flags & (MEM_Str|MEM_Blob) ){ - h += p->n; - if( p->flags & MEM_Zero ) h += p->u.nZero; + /* All strings have the same hash and all blobs have the same hash, + ** though, at least, those hashes are different from each other and + ** from NULL. */ + h += 4093 + (p->flags & (MEM_Str|MEM_Blob)); } } return h; } + +/* +** For OP_Column, factor out the case where content is loaded from +** overflow pages, so that the code to implement this case is separate +** the common case where all content fits on the page. Factoring out +** the code reduces register pressure and helps the common case +** to run faster. +*/ +static SQLITE_NOINLINE int vdbeColumnFromOverflow( + VdbeCursor *pC, /* The BTree cursor from which we are reading */ + int iCol, /* The column to read */ + int t, /* The serial-type code for the column value */ + i64 iOffset, /* Offset to the start of the content value */ + u32 cacheStatus, /* Current Vdbe.cacheCtr value */ + u32 colCacheCtr, /* Current value of the column cache counter */ + Mem *pDest /* Store the value into this register. */ +){ + int rc; + sqlite3 *db = pDest->db; + int encoding = pDest->enc; + int len = sqlite3VdbeSerialTypeLen(t); + assert( pC->eCurType==CURTYPE_BTREE ); + if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) return SQLITE_TOOBIG; + if( len > 4000 && pC->pKeyInfo==0 ){ + /* Cache large column values that are on overflow pages using + ** an RCStr (reference counted string) so that if they are reloaded, + ** that do not have to be copied a second time. The overhead of + ** creating and managing the cache is such that this is only + ** profitable for larger TEXT and BLOB values. + ** + ** Only do this on table-btrees so that writes to index-btrees do not + ** need to clear the cache. This buys performance in the common case + ** in exchange for generality. + */ + VdbeTxtBlbCache *pCache; + char *pBuf; + if( pC->colCache==0 ){ + pC->pCache = sqlite3DbMallocZero(db, sizeof(VdbeTxtBlbCache) ); + if( pC->pCache==0 ) return SQLITE_NOMEM; + pC->colCache = 1; + } + pCache = pC->pCache; + if( pCache->pCValue==0 + || pCache->iCol!=iCol + || pCache->cacheStatus!=cacheStatus + || pCache->colCacheCtr!=colCacheCtr + || pCache->iOffset!=sqlite3BtreeOffset(pC->uc.pCursor) + ){ + if( pCache->pCValue ) sqlite3RCStrUnref(pCache->pCValue); + pBuf = pCache->pCValue = sqlite3RCStrNew( len+3 ); + if( pBuf==0 ) return SQLITE_NOMEM; + rc = sqlite3BtreePayload(pC->uc.pCursor, iOffset, len, pBuf); + if( rc ) return rc; + pBuf[len] = 0; + pBuf[len+1] = 0; + pBuf[len+2] = 0; + pCache->iCol = iCol; + pCache->cacheStatus = cacheStatus; + pCache->colCacheCtr = colCacheCtr; + pCache->iOffset = sqlite3BtreeOffset(pC->uc.pCursor); + }else{ + pBuf = pCache->pCValue; + } + assert( t>=12 ); + sqlite3RCStrRef(pBuf); + if( t&1 ){ + rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, encoding, + sqlite3RCStrUnref); + pDest->flags |= MEM_Term; + }else{ + rc = sqlite3VdbeMemSetStr(pDest, pBuf, len, 0, + sqlite3RCStrUnref); + } + }else{ + rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, iOffset, len, pDest); + if( rc ) return rc; + sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest); + if( (t&1)!=0 && encoding==SQLITE_UTF8 ){ + pDest->z[len] = 0; + pDest->flags |= MEM_Term; + } + } + pDest->flags &= ~MEM_Ephem; + return rc; +} + + /* ** Return the symbolic name for the data type of a pMem */ @@ -89445,11 +93865,10 @@ SQLITE_PRIVATE int sqlite3VdbeExec( ){ Op *aOp = p->aOp; /* Copy of p->aOp */ Op *pOp = aOp; /* Current operation */ -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) - Op *pOrigOp; /* Value of pOp at the top of the loop */ -#endif #ifdef SQLITE_DEBUG + Op *pOrigOp; /* Value of pOp at the top of the loop */ int nExtraDelete = 0; /* Verifies FORDELETE and AUXDELETE flags */ + u8 iCompareIsInit = 0; /* iCompare is initialized */ #endif int rc = SQLITE_OK; /* Value to return */ sqlite3 *db = p->db; /* The database */ @@ -89465,13 +93884,17 @@ SQLITE_PRIVATE int sqlite3VdbeExec( Mem *pIn2 = 0; /* 2nd input operand */ Mem *pIn3 = 0; /* 3rd input operand */ Mem *pOut = 0; /* Output operand */ -#ifdef VDBE_PROFILE - u64 start; /* CPU clock count at start of opcode */ + u32 colCacheCtr = 0; /* Column cache counter */ +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE) + u64 *pnCycle = 0; + int bStmtScanStatus = IS_STMT_SCANSTATUS(db)!=0; #endif /*** INSERT STACK UNION HERE ***/ assert( p->eVdbeState==VDBE_RUN_STATE ); /* sqlite3_step() verifies this */ - sqlite3VdbeEnter(p); + if( DbMaskNonZero(p->lockMask) ){ + sqlite3VdbeEnter(p); + } #ifndef SQLITE_OMIT_PROGRESS_CALLBACK if( db->xProgress ){ u32 iPrior = p->aCounter[SQLITE_STMTSTATUS_VM_STEP]; @@ -89492,7 +93915,6 @@ SQLITE_PRIVATE int sqlite3VdbeExec( assert( p->bIsReader || p->readOnly!=0 ); p->iCurrentTime = 0; assert( p->explain==0 ); - p->pResultSet = 0; db->busyHandler.nBusy = 0; if( AtomicLoad(&db->u1.isInterrupted) ) goto abort_due_to_interrupt; sqlite3VdbeIOTraceSql(p); @@ -89529,12 +93951,18 @@ SQLITE_PRIVATE int sqlite3VdbeExec( assert( rc==SQLITE_OK ); assert( pOp>=aOp && pOp<&aOp[p->nOp]); -#ifdef VDBE_PROFILE - start = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); -#endif nVmStep++; -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - if( p->anExec ) p->anExec[(int)(pOp-aOp)]++; + +#if defined(VDBE_PROFILE) + pOp->nExec++; + pnCycle = &pOp->nCycle; + if( sqlite3NProfileCnt==0 ) *pnCycle -= sqlite3Hwtime(); +#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( bStmtScanStatus ){ + pOp->nExec++; + pnCycle = &pOp->nCycle; + *pnCycle -= sqlite3Hwtime(); + } #endif /* Only allow tracing if SQLITE_DEBUG is defined. @@ -89596,7 +94024,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec( } } #endif -#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE) +#ifdef SQLITE_DEBUG pOrigOp = pOp; #endif @@ -89652,8 +94080,8 @@ SQLITE_PRIVATE int sqlite3VdbeExec( case OP_Goto: { /* jump */ #ifdef SQLITE_DEBUG - /* In debuggging mode, when the p5 flags is set on an OP_Goto, that - ** means we should really jump back to the preceeding OP_ReleaseReg + /* In debugging mode, when the p5 flags is set on an OP_Goto, that + ** means we should really jump back to the preceding OP_ReleaseReg ** instruction. */ if( pOp->p5 ){ assert( pOp->p2 < (int)(pOp - aOp) ); @@ -89861,7 +94289,7 @@ case OP_HaltIfNull: { /* in3 */ ** P5 is a value between 0 and 4, inclusive, that modifies the P4 string. ** ** 0: (no change) -** 1: NOT NULL contraint failed: P4 +** 1: NOT NULL constraint failed: P4 ** 2: UNIQUE constraint failed: P4 ** 3: CHECK constraint failed: P4 ** 4: FOREIGN KEY constraint failed: P4 @@ -89880,6 +94308,12 @@ case OP_Halt: { #ifdef SQLITE_DEBUG if( pOp->p2==OE_Abort ){ sqlite3VdbeAssertAbortable(p); } #endif + + /* A deliberately coded "OP_Halt SQLITE_INTERNAL * * * *" opcode indicates + ** something is wrong with the code generator. Raise an assertion in order + ** to bring this to the attention of fuzzers and other testing tools. */ + assert( pOp->p1!=SQLITE_INTERNAL ); + if( p->pFrame && pOp->p1==SQLITE_OK ){ /* Halt the sub-program. Return control to the parent frame. */ pFrame = p->pFrame; @@ -90321,10 +94755,10 @@ case OP_ResultRow: { assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 ); p->cacheCtr = (p->cacheCtr + 2)|1; - p->pResultSet = &aMem[pOp->p1]; + p->pResultRow = &aMem[pOp->p1]; #ifdef SQLITE_DEBUG { - Mem *pMem = p->pResultSet; + Mem *pMem = p->pResultRow; int i; for(i=0; ip2; i++){ assert( memIsValid(&pMem[i]) ); @@ -90461,7 +94895,6 @@ case OP_Subtract: /* same as TK_MINUS, in1, in2, out3 */ case OP_Multiply: /* same as TK_STAR, in1, in2, out3 */ case OP_Divide: /* same as TK_SLASH, in1, in2, out3 */ case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */ - u16 flags; /* Combined MEM_* flags from both inputs */ u16 type1; /* Numeric type of left operand */ u16 type2; /* Numeric type of right operand */ i64 iA; /* Integer value of left operand */ @@ -90470,12 +94903,12 @@ case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */ double rB; /* Real value of right operand */ pIn1 = &aMem[pOp->p1]; - type1 = numericType(pIn1); + type1 = pIn1->flags; pIn2 = &aMem[pOp->p2]; - type2 = numericType(pIn2); + type2 = pIn2->flags; pOut = &aMem[pOp->p3]; - flags = pIn1->flags | pIn2->flags; if( (type1 & type2 & MEM_Int)!=0 ){ +int_math: iA = pIn1->u.i; iB = pIn2->u.i; switch( pOp->opcode ){ @@ -90497,9 +94930,12 @@ case OP_Remainder: { /* same as TK_REM, in1, in2, out3 */ } pOut->u.i = iB; MemSetTypeFlag(pOut, MEM_Int); - }else if( (flags & MEM_Null)!=0 ){ + }else if( ((type1 | type2) & MEM_Null)!=0 ){ goto arithmetic_result_is_null; }else{ + type1 = numericType(pIn1); + type2 = numericType(pIn2); + if( (type1 & type2 & MEM_Int)!=0 ) goto int_math; fp_math: rA = sqlite3VdbeRealValue(pIn1); rB = sqlite3VdbeRealValue(pIn2); @@ -90657,7 +95093,7 @@ case OP_AddImm: { /* in1 */ pIn1 = &aMem[pOp->p1]; memAboutToChange(p, pIn1); sqlite3VdbeMemIntegerify(pIn1); - pIn1->u.i += pOp->p2; + *(u64*)&pIn1->u.i += (u64)pOp->p2; break; } @@ -90852,7 +95288,6 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ flags1 = pIn1->flags; flags3 = pIn3->flags; if( (flags1 & flags3 & MEM_Int)!=0 ){ - assert( (pOp->p5 & SQLITE_AFF_MASK)!=SQLITE_AFF_TEXT || CORRUPT_DB ); /* Common case of comparison of two integers */ if( pIn3->u.i > pIn1->u.i ){ if( sqlite3aGTb[pOp->opcode] ){ @@ -90860,18 +95295,21 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ goto jump_to_p2; } iCompare = +1; + VVA_ONLY( iCompareIsInit = 1; ) }else if( pIn3->u.i < pIn1->u.i ){ if( sqlite3aLTb[pOp->opcode] ){ VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3); goto jump_to_p2; } iCompare = -1; + VVA_ONLY( iCompareIsInit = 1; ) }else{ if( sqlite3aEQb[pOp->opcode] ){ VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3); goto jump_to_p2; } iCompare = 0; + VVA_ONLY( iCompareIsInit = 1; ) } VdbeBranchTaken(0, (pOp->p5 & SQLITE_NULLEQ)?2:3); break; @@ -90903,6 +95341,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ goto jump_to_p2; } iCompare = 1; /* Operands are not equal */ + VVA_ONLY( iCompareIsInit = 1; ) break; } }else{ @@ -90913,24 +95352,28 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ if( (flags1 | flags3)&MEM_Str ){ if( (flags1 & (MEM_Int|MEM_IntReal|MEM_Real|MEM_Str))==MEM_Str ){ applyNumericAffinity(pIn1,0); - testcase( flags3==pIn3->flags ); + assert( flags3==pIn3->flags || CORRUPT_DB ); flags3 = pIn3->flags; } if( (flags3 & (MEM_Int|MEM_IntReal|MEM_Real|MEM_Str))==MEM_Str ){ applyNumericAffinity(pIn3,0); } } - }else if( affinity==SQLITE_AFF_TEXT ){ - if( (flags1 & MEM_Str)==0 && (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ + }else if( affinity==SQLITE_AFF_TEXT && ((flags1 | flags3) & MEM_Str)!=0 ){ + if( (flags1 & MEM_Str)!=0 ){ + pIn1->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal); + }else if( (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ testcase( pIn1->flags & MEM_Int ); testcase( pIn1->flags & MEM_Real ); testcase( pIn1->flags & MEM_IntReal ); sqlite3VdbeMemStringify(pIn1, encoding, 1); testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) ); flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask); - if( pIn1==pIn3 ) flags3 = flags1 | MEM_Str; + if( NEVER(pIn1==pIn3) ) flags3 = flags1 | MEM_Str; } - if( (flags3 & MEM_Str)==0 && (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ + if( (flags3 & MEM_Str)!=0 ){ + pIn3->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal); + }else if( (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){ testcase( pIn3->flags & MEM_Int ); testcase( pIn3->flags & MEM_Real ); testcase( pIn3->flags & MEM_IntReal ); @@ -90959,6 +95402,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ res2 = sqlite3aGTb[pOp->opcode]; } iCompare = res; + VVA_ONLY( iCompareIsInit = 1; ) /* Undo any changes made by applyAffinity() to the input registers. */ assert( (pIn3->flags & MEM_Dyn) == (flags3 & MEM_Dyn) ); @@ -90980,10 +95424,10 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */ ** opcodes are allowed to occur between this instruction and the previous ** OP_Lt or OP_Gt. ** -** If result of an OP_Eq comparison on the same two operands as the -** prior OP_Lt or OP_Gt would have been true, then jump to P2. -** If the result of an OP_Eq comparison on the two previous -** operands would have been false or NULL, then fall through. +** If the result of an OP_Eq comparison on the same two operands as +** the prior OP_Lt or OP_Gt would have been true, then jump to P2. If +** the result of an OP_Eq comparison on the two previous operands +** would have been false or NULL, then fall through. */ case OP_ElseEq: { /* same as TK_ESCAPE, jump */ @@ -90997,6 +95441,7 @@ case OP_ElseEq: { /* same as TK_ESCAPE, jump */ break; } #endif /* SQLITE_DEBUG */ + assert( iCompareIsInit ); VdbeBranchTaken(iCompare==0, 2); if( iCompare==0 ) goto jump_to_p2; break; @@ -91091,6 +95536,7 @@ case OP_Compare: { pColl = pKeyInfo->aColl[i]; bRev = (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_DESC); iCompare = sqlite3MemCompare(&aMem[p1+idx], &aMem[p2+idx], pColl); + VVA_ONLY( iCompareIsInit = 1; ) if( iCompare ){ if( (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_BIGNULL) && ((aMem[p1+idx].flags & MEM_Null) || (aMem[p2+idx].flags & MEM_Null)) @@ -91108,13 +95554,14 @@ case OP_Compare: { /* Opcode: Jump P1 P2 P3 * * ** ** Jump to the instruction at address P1, P2, or P3 depending on whether -** in the most recent OP_Compare instruction the P1 vector was less than +** in the most recent OP_Compare instruction the P1 vector was less than, ** equal to, or greater than the P2 vector, respectively. ** ** This opcode must immediately follow an OP_Compare opcode. */ case OP_Jump: { /* jump */ assert( pOp>aOp && pOp[-1].opcode==OP_Compare ); + assert( iCompareIsInit ); if( iCompare<0 ){ VdbeBranchTaken(0,4); pOp = &aOp[pOp->p1 - 1]; }else if( iCompare==0 ){ @@ -91314,26 +95761,103 @@ case OP_IsNull: { /* same as TK_ISNULL, jump, in1 */ break; } -/* Opcode: IsNullOrType P1 P2 P3 * * -** Synopsis: if typeof(r[P1]) IN (P3,5) goto P2 +/* Opcode: IsType P1 P2 P3 P4 P5 +** Synopsis: if typeof(P1.P3) in P5 goto P2 +** +** Jump to P2 if the type of a column in a btree is one of the types specified +** by the P5 bitmask. +** +** P1 is normally a cursor on a btree for which the row decode cache is +** valid through at least column P3. In other words, there should have been +** a prior OP_Column for column P3 or greater. If the cursor is not valid, +** then this opcode might give spurious results. +** The the btree row has fewer than P3 columns, then use P4 as the +** datatype. +** +** If P1 is -1, then P3 is a register number and the datatype is taken +** from the value in that register. +** +** P5 is a bitmask of data types. SQLITE_INTEGER is the least significant +** (0x01) bit. SQLITE_FLOAT is the 0x02 bit. SQLITE_TEXT is 0x04. +** SQLITE_BLOB is 0x08. SQLITE_NULL is 0x10. +** +** WARNING: This opcode does not reliably distinguish between NULL and REAL +** when P1>=0. If the database contains a NaN value, this opcode will think +** that the datatype is REAL when it should be NULL. When P1<0 and the value +** is already stored in register P3, then this opcode does reliably +** distinguish between NULL and REAL. The problem only arises then P1>=0. +** +** Take the jump to address P2 if and only if the datatype of the +** value determined by P1 and P3 corresponds to one of the bits in the +** P5 bitmask. ** -** Jump to P2 if the value in register P1 is NULL or has a datatype P3. -** P3 is an integer which should be one of SQLITE_INTEGER, SQLITE_FLOAT, -** SQLITE_BLOB, SQLITE_NULL, or SQLITE_TEXT. */ -case OP_IsNullOrType: { /* jump, in1 */ - int doTheJump; - pIn1 = &aMem[pOp->p1]; - doTheJump = (pIn1->flags & MEM_Null)!=0 || sqlite3_value_type(pIn1)==pOp->p3; - VdbeBranchTaken( doTheJump, 2); - if( doTheJump ) goto jump_to_p2; +case OP_IsType: { /* jump */ + VdbeCursor *pC; + u16 typeMask; + u32 serialType; + + assert( pOp->p1>=(-1) && pOp->p1nCursor ); + assert( pOp->p1>=0 || (pOp->p3>=0 && pOp->p3<=(p->nMem+1 - p->nCursor)) ); + if( pOp->p1>=0 ){ + pC = p->apCsr[pOp->p1]; + assert( pC!=0 ); + assert( pOp->p3>=0 ); + if( pOp->p3nHdrParsed ){ + serialType = pC->aType[pOp->p3]; + if( serialType>=12 ){ + if( serialType&1 ){ + typeMask = 0x04; /* SQLITE_TEXT */ + }else{ + typeMask = 0x08; /* SQLITE_BLOB */ + } + }else{ + static const unsigned char aMask[] = { + 0x10, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x2, + 0x01, 0x01, 0x10, 0x10 + }; + testcase( serialType==0 ); + testcase( serialType==1 ); + testcase( serialType==2 ); + testcase( serialType==3 ); + testcase( serialType==4 ); + testcase( serialType==5 ); + testcase( serialType==6 ); + testcase( serialType==7 ); + testcase( serialType==8 ); + testcase( serialType==9 ); + testcase( serialType==10 ); + testcase( serialType==11 ); + typeMask = aMask[serialType]; + } + }else{ + typeMask = 1 << (pOp->p4.i - 1); + testcase( typeMask==0x01 ); + testcase( typeMask==0x02 ); + testcase( typeMask==0x04 ); + testcase( typeMask==0x08 ); + testcase( typeMask==0x10 ); + } + }else{ + assert( memIsValid(&aMem[pOp->p3]) ); + typeMask = 1 << (sqlite3_value_type((sqlite3_value*)&aMem[pOp->p3])-1); + testcase( typeMask==0x01 ); + testcase( typeMask==0x02 ); + testcase( typeMask==0x04 ); + testcase( typeMask==0x08 ); + testcase( typeMask==0x10 ); + } + VdbeBranchTaken( (typeMask & pOp->p5)!=0, 2); + if( typeMask & pOp->p5 ){ + goto jump_to_p2; + } break; } /* Opcode: ZeroOrNull P1 P2 P3 * * ** Synopsis: r[P2] = 0 OR NULL ** -** If all both registers P1 and P3 are NOT NULL, then store a zero in +** If both registers P1 and P3 are NOT NULL, then store a zero in ** register P2. If either registers P1 or P3 are NULL then put ** a NULL in register P2. */ @@ -91369,11 +95893,14 @@ case OP_NotNull: { /* same as TK_NOTNULL, jump, in1 */ ** If it is, then set register P3 to NULL and jump immediately to P2. ** If P1 is not on a NULL row, then fall through without making any ** changes. +** +** If P1 is not an open cursor, then this opcode is a no-op. */ case OP_IfNullRow: { /* jump */ + VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( p->apCsr[pOp->p1]!=0 ); - if( p->apCsr[pOp->p1]->nullRow ){ + pC = p->apCsr[pOp->p1]; + if( pC && pC->nullRow ){ sqlite3VdbeMemSetNull(aMem + pOp->p3); goto jump_to_p2; } @@ -91424,7 +95951,7 @@ case OP_Offset: { /* out3 */ ** Interpret the data that cursor P1 points to as a structure built using ** the MakeRecord instruction. (See the MakeRecord opcode for additional ** information about the format of the data.) Extract the P2-th column -** from this record. If there are less that (P2+1) +** from this record. If there are less than (P2+1) ** values in the record, extract a NULL. ** ** The value extracted is stored in register P3. @@ -91433,12 +95960,14 @@ case OP_Offset: { /* out3 */ ** if the P4 argument is a P4_MEM use the value of the P4 argument as ** the result. ** -** If the OPFLAG_LENGTHARG and OPFLAG_TYPEOFARG bits are set on P5 then -** the result is guaranteed to only be used as the argument of a length() -** or typeof() function, respectively. The loading of large blobs can be -** skipped for length() and all content loading can be skipped for typeof(). +** If the OPFLAG_LENGTHARG bit is set in P5 then the result is guaranteed +** to only be used by the length() function or the equivalent. The content +** of large blobs is not loaded, thus saving CPU cycles. If the +** OPFLAG_TYPEOFARG bit is set then the result will only be used by the +** typeof() function or the IS NULL or IS NOT NULL operators or the +** equivalent. In this case, all content loading can be omitted. */ -case OP_Column: { +case OP_Column: { /* ncycle */ u32 p2; /* column number to retrieve */ VdbeCursor *pC; /* The VDBE cursor */ BtCursor *pCrsr; /* The B-Tree cursor corresponding to pC */ @@ -91682,11 +96211,16 @@ case OP_Column: { pDest->flags = aFlag[t&1]; } }else{ + u8 p5; pDest->enc = encoding; + assert( pDest->db==db ); /* This branch happens only when content is on overflow pages */ - if( ((pOp->p5 & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG))!=0 - && ((t>=12 && (t&1)==0) || (pOp->p5 & OPFLAG_TYPEOFARG)!=0)) - || (len = sqlite3VdbeSerialTypeLen(t))==0 + if( ((p5 = (pOp->p5 & OPFLAG_BYTELENARG))!=0 + && (p5==OPFLAG_TYPEOFARG + || (t>=12 && ((t&1)==0 || p5==OPFLAG_BYTELENARG)) + ) + ) + || sqlite3VdbeSerialTypeLen(t)==0 ){ /* Content is irrelevant for ** 1. the typeof() function, @@ -91703,11 +96237,13 @@ case OP_Column: { */ sqlite3VdbeSerialGet((u8*)sqlite3CtypeMap, t, pDest); }else{ - if( len>db->aLimit[SQLITE_LIMIT_LENGTH] ) goto too_big; - rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest); - if( rc!=SQLITE_OK ) goto abort_due_to_error; - sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest); - pDest->flags &= ~MEM_Ephem; + rc = vdbeColumnFromOverflow(pC, p2, t, aOffset[p2], + p->cacheCtr, colCacheCtr, pDest); + if( rc ){ + if( rc==SQLITE_NOMEM ) goto no_mem; + if( rc==SQLITE_TOOBIG ) goto too_big; + goto abort_due_to_error; + } } } @@ -91787,7 +96323,7 @@ case OP_TypeCheck: { } case COLTYPE_REAL: { testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_Real ); - testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_IntReal ); + assert( (pIn1->flags & MEM_IntReal)==0 ); if( pIn1->flags & MEM_Int ){ /* When applying REAL affinity, if the result is still an MEM_Int ** that will fit in 6 bytes, then change the type to MEM_IntReal @@ -91866,7 +96402,7 @@ case OP_Affinity: { }else{ pIn1->u.r = (double)pIn1->u.i; pIn1->flags |= MEM_Real; - pIn1->flags &= ~MEM_Int; + pIn1->flags &= ~(MEM_Int|MEM_Str); } } REGISTER_TRACE((int)(pIn1-aMem), pIn1); @@ -92169,7 +96705,6 @@ case OP_MakeRecord: { /* NULL value. No change in zPayload */ }else{ u64 v; - u32 i; if( serial_type==7 ){ assert( sizeof(v)==sizeof(pRec->u.r) ); memcpy(&v, &pRec->u.r, sizeof(v)); @@ -92177,12 +96712,17 @@ case OP_MakeRecord: { }else{ v = pRec->u.i; } - len = i = sqlite3SmallTypeSizes[serial_type]; - assert( i>0 ); - while( 1 /*exit-by-break*/ ){ - zPayload[--i] = (u8)(v&0xFF); - if( i==0 ) break; - v >>= 8; + len = sqlite3SmallTypeSizes[serial_type]; + assert( len>=1 && len<=8 && len!=5 && len!=7 ); + switch( len ){ + default: zPayload[7] = (u8)(v&0xff); v >>= 8; + zPayload[6] = (u8)(v&0xff); v >>= 8; + case 6: zPayload[5] = (u8)(v&0xff); v >>= 8; + zPayload[4] = (u8)(v&0xff); v >>= 8; + case 4: zPayload[3] = (u8)(v&0xff); v >>= 8; + case 3: zPayload[2] = (u8)(v&0xff); v >>= 8; + case 2: zPayload[1] = (u8)(v&0xff); v >>= 8; + case 1: zPayload[0] = (u8)(v&0xff); } zPayload += len; } @@ -92790,7 +97330,7 @@ case OP_SetCookie: { ** ** See also: OP_OpenRead, OP_ReopenIdx */ -case OP_ReopenIdx: { +case OP_ReopenIdx: { /* ncycle */ int nField; KeyInfo *pKeyInfo; u32 p2; @@ -92811,7 +97351,7 @@ case OP_ReopenIdx: { } /* If the cursor is not currently open or is open on a different ** index, then fall through into OP_OpenRead to force a reopen */ -case OP_OpenRead: +case OP_OpenRead: /* ncycle */ case OP_OpenWrite: assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ ); @@ -92905,7 +97445,7 @@ case OP_OpenWrite: ** ** Duplicate ephemeral cursors are used for self-joins of materialized views. */ -case OP_OpenDup: { +case OP_OpenDup: { /* ncycle */ VdbeCursor *pOrig; /* The original cursor to be duplicated */ VdbeCursor *pCx; /* The new cursor */ @@ -92967,8 +97507,8 @@ case OP_OpenDup: { ** by this opcode will be used for automatically created transient ** indices in joins. */ -case OP_OpenAutoindex: -case OP_OpenEphemeral: { +case OP_OpenAutoindex: /* ncycle */ +case OP_OpenEphemeral: { /* ncycle */ VdbeCursor *pCx; KeyInfo *pKeyInfo; @@ -92991,7 +97531,7 @@ case OP_OpenEphemeral: { } pCx = p->apCsr[pOp->p1]; if( pCx && !pCx->noReuse && ALWAYS(pOp->p2<=pCx->nField) ){ - /* If the ephermeral table is already open and has no duplicates from + /* If the ephemeral table is already open and has no duplicates from ** OP_OpenDup, then erase all existing content so that the table is ** empty again, rather than creating a new table. */ assert( pCx->isEphemeral ); @@ -93126,7 +97666,7 @@ case OP_OpenPseudo: { ** Close a cursor previously opened as P1. If P1 is not ** currently open, this instruction is a no-op. */ -case OP_Close: { +case OP_Close: { /* ncycle */ assert( pOp->p1>=0 && pOp->p1nCursor ); sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]); p->apCsr[pOp->p1] = 0; @@ -93243,10 +97783,10 @@ case OP_ColumnsUsed: { ** ** See also: Found, NotFound, SeekGt, SeekGe, SeekLt */ -case OP_SeekLT: /* jump, in3, group */ -case OP_SeekLE: /* jump, in3, group */ -case OP_SeekGE: /* jump, in3, group */ -case OP_SeekGT: { /* jump, in3, group */ +case OP_SeekLT: /* jump, in3, group, ncycle */ +case OP_SeekLE: /* jump, in3, group, ncycle */ +case OP_SeekGE: /* jump, in3, group, ncycle */ +case OP_SeekGT: { /* jump, in3, group, ncycle */ int res; /* Comparison result */ int oc; /* Opcode */ VdbeCursor *pC; /* The cursor to seek */ @@ -93375,7 +97915,13 @@ case OP_SeekGT: { /* jump, in3, group */ r.aMem = &aMem[pOp->p3]; #ifdef SQLITE_DEBUG - { int i; for(i=0; i0 ) REGISTER_TRACE(pOp->p3+i, &r.aMem[i]); + } + } #endif r.eqSeen = 0; rc = sqlite3BtreeIndexMoveto(pC->uc.pCursor, &r, &res); @@ -93438,7 +97984,7 @@ case OP_SeekGT: { /* jump, in3, group */ } -/* Opcode: SeekScan P1 P2 * * * +/* Opcode: SeekScan P1 P2 * * P5 ** Synopsis: Scan-ahead up to P1 rows ** ** This opcode is a prefix opcode to OP_SeekGE. In other words, this @@ -93448,8 +97994,8 @@ case OP_SeekGT: { /* jump, in3, group */ ** This opcode uses the P1 through P4 operands of the subsequent ** OP_SeekGE. In the text that follows, the operands of the subsequent ** OP_SeekGE opcode are denoted as SeekOP.P1 through SeekOP.P4. Only -** the P1 and P2 operands of this opcode are also used, and are called -** This.P1 and This.P2. +** the P1, P2 and P5 operands of this opcode are also used, and are called +** This.P1, This.P2 and This.P5. ** ** This opcode helps to optimize IN operators on a multi-column index ** where the IN operator is on the later terms of the index by avoiding @@ -93459,32 +98005,54 @@ case OP_SeekGT: { /* jump, in3, group */ ** ** The SeekGE.P3 and SeekGE.P4 operands identify an unpacked key which ** is the desired entry that we want the cursor SeekGE.P1 to be pointing -** to. Call this SeekGE.P4/P5 row the "target". +** to. Call this SeekGE.P3/P4 row the "target". ** ** If the SeekGE.P1 cursor is not currently pointing to a valid row, ** then this opcode is a no-op and control passes through into the OP_SeekGE. ** ** If the SeekGE.P1 cursor is pointing to a valid row, then that row ** might be the target row, or it might be near and slightly before the -** target row. This opcode attempts to position the cursor on the target -** row by, perhaps by invoking sqlite3BtreeStep() on the cursor -** between 0 and This.P1 times. -** -** There are three possible outcomes from this opcode:
      -** -**
    1. If after This.P1 steps, the cursor is still pointing to a place that -** is earlier in the btree than the target row, then fall through -** into the subsquence OP_SeekGE opcode. -** -**
    2. If the cursor is successfully moved to the target row by 0 or more -** sqlite3BtreeNext() calls, then jump to This.P2, which will land just -** past the OP_IdxGT or OP_IdxGE opcode that follows the OP_SeekGE. -** -**
    3. If the cursor ends up past the target row (indicating the the target -** row does not exist in the btree) then jump to SeekOP.P2. +** target row, or it might be after the target row. If the cursor is +** currently before the target row, then this opcode attempts to position +** the cursor on or after the target row by invoking sqlite3BtreeStep() +** on the cursor between 1 and This.P1 times. +** +** The This.P5 parameter is a flag that indicates what to do if the +** cursor ends up pointing at a valid row that is past the target +** row. If This.P5 is false (0) then a jump is made to SeekGE.P2. If +** This.P5 is true (non-zero) then a jump is made to This.P2. The P5==0 +** case occurs when there are no inequality constraints to the right of +** the IN constraint. The jump to SeekGE.P2 ends the loop. The P5!=0 case +** occurs when there are inequality constraints to the right of the IN +** operator. In that case, the This.P2 will point either directly to or +** to setup code prior to the OP_IdxGT or OP_IdxGE opcode that checks for +** loop terminate. +** +** Possible outcomes from this opcode:
        +** +**
      1. If the cursor is initially not pointed to any valid row, then +** fall through into the subsequent OP_SeekGE opcode. +** +**
      2. If the cursor is left pointing to a row that is before the target +** row, even after making as many as This.P1 calls to +** sqlite3BtreeNext(), then also fall through into OP_SeekGE. +** +**
      3. If the cursor is left pointing at the target row, either because it +** was at the target row to begin with or because one or more +** sqlite3BtreeNext() calls moved the cursor to the target row, +** then jump to This.P2.., +** +**
      4. If the cursor started out before the target row and a call to +** to sqlite3BtreeNext() moved the cursor off the end of the index +** (indicating that the target row definitely does not exist in the +** btree) then jump to SeekGE.P2, ending the loop. +** +**
      5. If the cursor ends up on a valid row that is past the target row +** (indicating that the target row does not exist in the btree) then +** jump to SeekOP.P2 if This.P5==0 or to This.P2 if This.P5>0. **
      */ -case OP_SeekScan: { +case OP_SeekScan: { /* ncycle */ VdbeCursor *pC; int res; int nStep; @@ -93492,14 +98060,25 @@ case OP_SeekScan: { assert( pOp[1].opcode==OP_SeekGE ); - /* pOp->p2 points to the first instruction past the OP_IdxGT that - ** follows the OP_SeekGE. */ + /* If pOp->p5 is clear, then pOp->p2 points to the first instruction past the + ** OP_IdxGT that follows the OP_SeekGE. Otherwise, it points to the first + ** opcode past the OP_SeekGE itself. */ assert( pOp->p2>=(int)(pOp-aOp)+2 ); - assert( aOp[pOp->p2-1].opcode==OP_IdxGT || aOp[pOp->p2-1].opcode==OP_IdxGE ); - testcase( aOp[pOp->p2-1].opcode==OP_IdxGE ); - assert( pOp[1].p1==aOp[pOp->p2-1].p1 ); - assert( pOp[1].p2==aOp[pOp->p2-1].p2 ); - assert( pOp[1].p3==aOp[pOp->p2-1].p3 ); +#ifdef SQLITE_DEBUG + if( pOp->p5==0 ){ + /* There are no inequality constraints following the IN constraint. */ + assert( pOp[1].p1==aOp[pOp->p2-1].p1 ); + assert( pOp[1].p2==aOp[pOp->p2-1].p2 ); + assert( pOp[1].p3==aOp[pOp->p2-1].p3 ); + assert( aOp[pOp->p2-1].opcode==OP_IdxGT + || aOp[pOp->p2-1].opcode==OP_IdxGE ); + testcase( aOp[pOp->p2-1].opcode==OP_IdxGE ); + }else{ + /* There are inequality constraints. */ + assert( pOp->p2==(int)(pOp-aOp)+2 ); + assert( aOp[pOp->p2-1].opcode==OP_SeekGE ); + } +#endif assert( pOp->p1>0 ); pC = p->apCsr[pOp[1].p1]; @@ -93533,8 +98112,9 @@ case OP_SeekScan: { while(1){ rc = sqlite3VdbeIdxKeyCompare(db, pC, &r, &res); if( rc ) goto abort_due_to_error; - if( res>0 ){ + if( res>0 && pOp->p5==0 ){ seekscan_search_fail: + /* Jump to SeekGE.P2, ending the loop */ #ifdef SQLITE_DEBUG if( db->flags&SQLITE_VdbeTrace ){ printf("... %d steps and then skip\n", pOp->p1 - nStep); @@ -93544,7 +98124,8 @@ case OP_SeekScan: { pOp++; goto jump_to_p2; } - if( res==0 ){ + if( res>=0 ){ + /* Jump to This.P2, bypassing the OP_SeekGE opcode */ #ifdef SQLITE_DEBUG if( db->flags&SQLITE_VdbeTrace ){ printf("... %d steps and then success\n", pOp->p1 - nStep); @@ -93564,6 +98145,7 @@ case OP_SeekScan: { break; } nStep--; + pC->cacheStatus = CACHE_STALE; rc = sqlite3BtreeNext(pC->uc.pCursor, 0); if( rc ){ if( rc==SQLITE_DONE ){ @@ -93593,7 +98175,7 @@ case OP_SeekScan: { ** ** P1 must be a valid b-tree cursor. */ -case OP_SeekHit: { +case OP_SeekHit: { /* ncycle */ VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; @@ -93620,12 +98202,16 @@ case OP_SeekHit: { /* Opcode: IfNotOpen P1 P2 * * * ** Synopsis: if( !csr[P1] ) goto P2 ** -** If cursor P1 is not open, jump to instruction P2. Otherwise, fall through. +** If cursor P1 is not open or if P1 is set to a NULL row using the +** OP_NullRow opcode, then jump to instruction P2. Otherwise, fall through. */ case OP_IfNotOpen: { /* jump */ + VdbeCursor *pCur; + assert( pOp->p1>=0 && pOp->p1nCursor ); - VdbeBranchTaken(p->apCsr[pOp->p1]==0, 2); - if( !p->apCsr[pOp->p1] ){ + pCur = p->apCsr[pOp->p1]; + VdbeBranchTaken(pCur==0 || pCur->nullRow, 2); + if( pCur==0 || pCur->nullRow ){ goto jump_to_p2_and_check_for_interrupt; } break; @@ -93676,13 +98262,13 @@ case OP_IfNotOpen: { /* jump */ ** operands to OP_NotFound and OP_IdxGT. ** ** This opcode is an optimization attempt only. If this opcode always -** falls through, the correct answer is still obtained, but extra works +** falls through, the correct answer is still obtained, but extra work ** is performed. ** ** A value of N in the seekHit flag of cursor P1 means that there exists ** a key P3:N that will match some record in the index. We want to know ** if it is possible for a record P3:P4 to match some record in the -** index. If it is not possible, we can skips some work. So if seekHit +** index. If it is not possible, we can skip some work. So if seekHit ** is less than P4, attempt to find out if a match is possible by running ** OP_NotFound. ** @@ -93721,7 +98307,7 @@ case OP_IfNotOpen: { /* jump */ ** ** See also: NotFound, Found, NotExists */ -case OP_IfNoHope: { /* jump, in3 */ +case OP_IfNoHope: { /* jump, in3, ncycle */ VdbeCursor *pC; assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; @@ -93735,9 +98321,9 @@ case OP_IfNoHope: { /* jump, in3 */ /* Fall through into OP_NotFound */ /* no break */ deliberate_fall_through } -case OP_NoConflict: /* jump, in3 */ -case OP_NotFound: /* jump, in3 */ -case OP_Found: { /* jump, in3 */ +case OP_NoConflict: /* jump, in3, ncycle */ +case OP_NotFound: /* jump, in3, ncycle */ +case OP_Found: { /* jump, in3, ncycle */ int alreadyExists; int ii; VdbeCursor *pC; @@ -93867,7 +98453,7 @@ case OP_Found: { /* jump, in3 */ ** ** See also: Found, NotFound, NoConflict, SeekRowid */ -case OP_SeekRowid: { /* jump, in3 */ +case OP_SeekRowid: { /* jump, in3, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -93892,7 +98478,7 @@ case OP_SeekRowid: { /* jump, in3 */ } /* Fall through into OP_NotExists */ /* no break */ deliberate_fall_through -case OP_NotExists: /* jump, in3 */ +case OP_NotExists: /* jump, in3, ncycle */ pIn3 = &aMem[pOp->p3]; assert( (pIn3->flags & MEM_Int)!=0 || pOp->opcode==OP_SeekRowid ); assert( pOp->p1>=0 && pOp->p1nCursor ); @@ -94172,8 +98758,11 @@ case OP_Insert: { if( pOp->p5 & OPFLAG_ISNOOP ) break; #endif - if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++; - if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey; + assert( (pOp->p5 & OPFLAG_LASTROWID)==0 || (pOp->p5 & OPFLAG_NCHANGE)!=0 ); + if( pOp->p5 & OPFLAG_NCHANGE ){ + p->nChange++; + if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey; + } assert( (pData->flags & (MEM_Blob|MEM_Str))!=0 || pData->n==0 ); x.pData = pData->z; x.nData = pData->n; @@ -94184,12 +98773,14 @@ case OP_Insert: { x.nZero = 0; } x.pKey = 0; + assert( BTREE_PREFORMAT==OPFLAG_PREFORMAT ); rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION|OPFLAG_PREFORMAT)), seekResult ); pC->deferredMoveto = 0; pC->cacheStatus = CACHE_STALE; + colCacheCtr++; /* Invoke the update-hook if required. */ if( rc ) goto abort_due_to_error; @@ -94243,13 +98834,18 @@ case OP_RowCell: { ** left in an undefined state. ** ** If the OPFLAG_AUXDELETE bit is set on P5, that indicates that this -** delete one of several associated with deleting a table row and all its -** associated index entries. Exactly one of those deletes is the "primary" -** delete. The others are all on OPFLAG_FORDELETE cursors or else are -** marked with the AUXDELETE flag. +** delete is one of several associated with deleting a table row and +** all its associated index entries. Exactly one of those deletes is +** the "primary" delete. The others are all on OPFLAG_FORDELETE +** cursors or else are marked with the AUXDELETE flag. ** -** If the OPFLAG_NCHANGE flag of P2 (NB: P2 not P5) is set, then the row -** change count is incremented (otherwise not). +** If the OPFLAG_NCHANGE (0x01) flag of P2 (NB: P2 not P5) is set, then +** the row change count is incremented (otherwise not). +** +** If the OPFLAG_ISNOOP (0x40) flag of P2 (not P5!) is set, then the +** pre-update-hook for deletes is run, but the btree is otherwise unchanged. +** This happens when the OP_Delete is to be shortly followed by an OP_Insert +** with the same key, causing the btree entry to be overwritten. ** ** P1 must not be pseudo-table. It has to be a real table with ** multiple rows. @@ -94350,6 +98946,7 @@ case OP_Delete: { rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5); pC->cacheStatus = CACHE_STALE; + colCacheCtr++; pC->seekResult = 0; if( rc ) goto abort_due_to_error; @@ -94417,13 +99014,13 @@ case OP_SorterCompare: { ** Write into register P2 the current sorter data for sorter cursor P1. ** Then clear the column header cache on cursor P3. ** -** This opcode is normally use to move a record out of the sorter and into +** This opcode is normally used to move a record out of the sorter and into ** a register that is the source for a pseudo-table cursor created using ** OpenPseudo. That pseudo-table cursor is the one that is identified by ** parameter P3. Clearing the P3 column cache as part of this opcode saves ** us from having to issue a separate NullRow instruction to clear that cache. */ -case OP_SorterData: { +case OP_SorterData: { /* ncycle */ VdbeCursor *pC; pOut = &aMem[pOp->p2]; @@ -94515,7 +99112,7 @@ case OP_RowData: { ** be a separate OP_VRowid opcode for use with virtual tables, but this ** one opcode now works for both table types. */ -case OP_Rowid: { /* out2 */ +case OP_Rowid: { /* out2, ncycle */ VdbeCursor *pC; i64 v; sqlite3_vtab *pVtab; @@ -94614,8 +99211,8 @@ case OP_NullRow: { ** from the end toward the beginning. In other words, the cursor is ** configured to use Prev, not Next. */ -case OP_SeekEnd: -case OP_Last: { /* jump */ +case OP_SeekEnd: /* ncycle */ +case OP_Last: { /* jump, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; @@ -94698,8 +99295,8 @@ case OP_IfSmaller: { /* jump */ ** regression tests can determine whether or not the optimizer is ** correctly optimizing out sorts. */ -case OP_SorterSort: /* jump */ -case OP_Sort: { /* jump */ +case OP_SorterSort: /* jump ncycle */ +case OP_Sort: { /* jump ncycle */ #ifdef SQLITE_TEST sqlite3_sort_count++; sqlite3_search_count--; @@ -94716,17 +99313,22 @@ case OP_Sort: { /* jump */ ** If the table or index is not empty, fall through to the following ** instruction. ** +** If P2 is zero, that is an assertion that the P1 table is never +** empty and hence the jump will never be taken. +** ** This opcode leaves the cursor configured to move in forward order, ** from the beginning toward the end. In other words, the cursor is ** configured to use Next, not Prev. */ -case OP_Rewind: { /* jump */ +case OP_Rewind: { /* jump, ncycle */ VdbeCursor *pC; BtCursor *pCrsr; int res; assert( pOp->p1>=0 && pOp->p1nCursor ); assert( pOp->p5==0 ); + assert( pOp->p2>=0 && pOp->p2nOp ); + pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) ); @@ -94746,9 +99348,10 @@ case OP_Rewind: { /* jump */ } if( rc ) goto abort_due_to_error; pC->nullRow = (u8)res; - assert( pOp->p2>0 && pOp->p2nOp ); - VdbeBranchTaken(res!=0,2); - if( res ) goto jump_to_p2; + if( pOp->p2>0 ){ + VdbeBranchTaken(res!=0,2); + if( res ) goto jump_to_p2; + } break; } @@ -94814,9 +99417,11 @@ case OP_SorterNext: { /* jump */ rc = sqlite3VdbeSorterNext(db, pC); goto next_tail; -case OP_Prev: /* jump */ +case OP_Prev: /* jump, ncycle */ assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( pOp->p5aCounter) ); + assert( pOp->p5==0 + || pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP + || pOp->p5==SQLITE_STMTSTATUS_AUTOINDEX); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->deferredMoveto==0 ); @@ -94827,9 +99432,11 @@ case OP_Prev: /* jump */ rc = sqlite3BtreePrevious(pC->uc.pCursor, pOp->p3); goto next_tail; -case OP_Next: /* jump */ +case OP_Next: /* jump, ncycle */ assert( pOp->p1>=0 && pOp->p1nCursor ); - assert( pOp->p5aCounter) ); + assert( pOp->p5==0 + || pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP + || pOp->p5==SQLITE_STMTSTATUS_AUTOINDEX); pC = p->apCsr[pOp->p1]; assert( pC!=0 ); assert( pC->deferredMoveto==0 ); @@ -95017,8 +99624,8 @@ case OP_IdxDelete: { ** ** See also: Rowid, MakeRecord. */ -case OP_DeferredSeek: -case OP_IdxRowid: { /* out2 */ +case OP_DeferredSeek: /* ncycle */ +case OP_IdxRowid: { /* out2, ncycle */ VdbeCursor *pC; /* The P1 index cursor */ VdbeCursor *pTabCur; /* The P2 table cursor (OP_DeferredSeek only) */ i64 rowid; /* Rowid that P1 current points to */ @@ -95036,10 +99643,10 @@ case OP_IdxRowid: { /* out2 */ ** of sqlite3VdbeCursorRestore() and sqlite3VdbeIdxRowid(). */ rc = sqlite3VdbeCursorRestore(pC); - /* sqlite3VbeCursorRestore() can only fail if the record has been deleted - ** out from under the cursor. That will never happens for an IdxRowid - ** or Seek opcode */ - if( NEVER(rc!=SQLITE_OK) ) goto abort_due_to_error; + /* sqlite3VdbeCursorRestore() may fail if the cursor has been disturbed + ** since it was last positioned and an error (e.g. OOM or an IO error) + ** occurs while trying to reposition it. */ + if( rc!=SQLITE_OK ) goto abort_due_to_error; if( !pC->nullRow ){ rowid = 0; /* Not needed. Only used to silence a warning. */ @@ -95080,8 +99687,8 @@ case OP_IdxRowid: { /* out2 */ ** seek operation now, without further delay. If the cursor seek has ** already occurred, this instruction is a no-op. */ -case OP_FinishSeek: { - VdbeCursor *pC; /* The P1 index cursor */ +case OP_FinishSeek: { /* ncycle */ + VdbeCursor *pC; /* The P1 index cursor */ assert( pOp->p1>=0 && pOp->p1nCursor ); pC = p->apCsr[pOp->p1]; @@ -95136,10 +99743,10 @@ case OP_FinishSeek: { ** If the P1 index entry is less than or equal to the key value then jump ** to P2. Otherwise fall through to the next instruction. */ -case OP_IdxLE: /* jump */ -case OP_IdxGT: /* jump */ -case OP_IdxLT: /* jump */ -case OP_IdxGE: { /* jump */ +case OP_IdxLE: /* jump, ncycle */ +case OP_IdxGT: /* jump, ncycle */ +case OP_IdxLT: /* jump, ncycle */ +case OP_IdxGE: { /* jump, ncycle */ VdbeCursor *pC; int res; UnpackedRecord r; @@ -95216,7 +99823,7 @@ case OP_IdxGE: { /* jump */ ** file is given by P1. ** ** The table being destroyed is in the main database file if P3==0. If -** P3==1 then the table to be clear is in the auxiliary database file +** P3==1 then the table to be destroyed is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** ** If AUTOVACUUM is enabled then it is possible that another root page @@ -95276,8 +99883,8 @@ case OP_Destroy: { /* out2 */ ** in the database file is given by P1. But, unlike Destroy, do not ** remove the table or index from the database file. ** -** The table being clear is in the main database file if P2==0. If -** P2==1 then the table to be clear is in the auxiliary database file +** The table being cleared is in the main database file if P2==0. If +** P2==1 then the table to be cleared is in the auxiliary database file ** that is used to store tables create using CREATE TEMPORARY TABLE. ** ** If the P3 value is non-zero, then the row change count is incremented @@ -95363,13 +99970,41 @@ case OP_CreateBtree: { /* out2 */ /* Opcode: SqlExec * * * P4 * ** ** Run the SQL statement or statements specified in the P4 string. +** Disable Auth and Trace callbacks while those statements are running if +** P1 is true. */ case OP_SqlExec: { + char *zErr; +#ifndef SQLITE_OMIT_AUTHORIZATION + sqlite3_xauth xAuth; +#endif + u8 mTrace; + sqlite3VdbeIncrWriteCounter(p, 0); db->nSqlExec++; - rc = sqlite3_exec(db, pOp->p4.z, 0, 0, 0); + zErr = 0; +#ifndef SQLITE_OMIT_AUTHORIZATION + xAuth = db->xAuth; +#endif + mTrace = db->mTrace; + if( pOp->p1 ){ +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = 0; +#endif + db->mTrace = 0; + } + rc = sqlite3_exec(db, pOp->p4.z, 0, 0, &zErr); db->nSqlExec--; - if( rc ) goto abort_due_to_error; +#ifndef SQLITE_OMIT_AUTHORIZATION + db->xAuth = xAuth; +#endif + db->mTrace = mTrace; + if( zErr || rc ){ + sqlite3VdbeError(p, "%s", zErr); + sqlite3_free(zErr); + if( rc==SQLITE_NOMEM ) goto no_mem; + goto abort_due_to_error; + } break; } @@ -95550,13 +100185,14 @@ case OP_IntegrityCk: { pIn1 = &aMem[pOp->p1]; assert( pOp->p5nDb ); assert( DbMaskTest(p->btreeMask, pOp->p5) ); - z = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot, - (int)pnErr->u.i+1, &nErr); + rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot, + (int)pnErr->u.i+1, &nErr, &z); sqlite3VdbeMemSetNull(pIn1); if( nErr==0 ){ assert( z==0 ); - }else if( z==0 ){ - goto no_mem; + }else if( rc ){ + sqlite3_free(z); + goto abort_due_to_error; }else{ pnErr->u.i -= nErr-1; sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free); @@ -95760,9 +100396,6 @@ case OP_Program: { /* jump */ pFrame->aOp = p->aOp; pFrame->nOp = p->nOp; pFrame->token = pProgram->token; -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - pFrame->anExec = p->anExec; -#endif #ifdef SQLITE_DEBUG pFrame->iFrameMagic = SQLITE_FRAME_MAGIC; #endif @@ -95799,9 +100432,6 @@ case OP_Program: { /* jump */ memset(pFrame->aOnce, 0, (pProgram->nOp + 7)/8); p->aOp = aOp = pProgram->aOp; p->nOp = pProgram->nOp; -#ifdef SQLITE_ENABLE_STMT_SCANSTATUS - p->anExec = 0; -#endif #ifdef SQLITE_DEBUG /* Verify that second and subsequent executions of the same trigger do not ** try to reuse register values from the first use. */ @@ -95941,7 +100571,7 @@ case OP_IfPos: { /* jump, in1 */ ** Synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) ** ** This opcode performs a commonly used computation associated with -** LIMIT and OFFSET process. r[P1] holds the limit counter. r[P3] +** LIMIT and OFFSET processing. r[P1] holds the limit counter. r[P3] ** holds the offset counter. The opcode computes the combined value ** of the LIMIT and OFFSET and stores that value in r[P2]. The r[P2] ** value computed is the total number of rows that will need to be @@ -96108,7 +100738,7 @@ case OP_AggStep1: { /* If this function is inside of a trigger, the register array in aMem[] ** might change from one evaluation to the next. The next block of code ** checks to see if the register array has changed, and if so it - ** reinitializes the relavant parts of the sqlite3_context object */ + ** reinitializes the relevant parts of the sqlite3_context object */ if( pCtx->pMem != pMem ){ pCtx->pMem = pMem; for(i=pCtx->argc-1; i>=0; i--) pCtx->argv[i] = &aMem[pOp->p2+i]; @@ -96203,6 +100833,7 @@ case OP_AggFinal: { } sqlite3VdbeChangeEncoding(pMem, encoding); UPDATE_MAX_BLOBSIZE(pMem); + REGISTER_TRACE((int)(pMem-aMem), pMem); break; } @@ -96558,7 +101189,7 @@ case OP_VDestroy: { ** P1 is a cursor number. This opcode opens a cursor to the virtual ** table and stores that cursor in P1. */ -case OP_VOpen: { +case OP_VOpen: { /* ncycle */ VdbeCursor *pCur; sqlite3_vtab_cursor *pVCur; sqlite3_vtab *pVtab; @@ -96594,6 +101225,52 @@ case OP_VOpen: { } #endif /* SQLITE_OMIT_VIRTUALTABLE */ +#ifndef SQLITE_OMIT_VIRTUALTABLE +/* Opcode: VCheck P1 P2 P3 P4 * +** +** P4 is a pointer to a Table object that is a virtual table in schema P1 +** that supports the xIntegrity() method. This opcode runs the xIntegrity() +** method for that virtual table, using P3 as the integer argument. If +** an error is reported back, the table name is prepended to the error +** message and that message is stored in P2. If no errors are seen, +** register P2 is set to NULL. +*/ +case OP_VCheck: { /* out2 */ + Table *pTab; + sqlite3_vtab *pVtab; + const sqlite3_module *pModule; + char *zErr = 0; + + pOut = &aMem[pOp->p2]; + sqlite3VdbeMemSetNull(pOut); /* Innocent until proven guilty */ + assert( pOp->p4type==P4_TABLEREF ); + pTab = pOp->p4.pTab; + assert( pTab!=0 ); + assert( pTab->nTabRef>0 ); + assert( IsVirtual(pTab) ); + if( pTab->u.vtab.p==0 ) break; + pVtab = pTab->u.vtab.p->pVtab; + assert( pVtab!=0 ); + pModule = pVtab->pModule; + assert( pModule!=0 ); + assert( pModule->iVersion>=4 ); + assert( pModule->xIntegrity!=0 ); + sqlite3VtabLock(pTab->u.vtab.p); + assert( pOp->p1>=0 && pOp->p1nDb ); + rc = pModule->xIntegrity(pVtab, db->aDb[pOp->p1].zDbSName, pTab->zName, + pOp->p3, &zErr); + sqlite3VtabUnlock(pTab->u.vtab.p); + if( rc ){ + sqlite3_free(zErr); + goto abort_due_to_error; + } + if( zErr ){ + sqlite3VdbeMemSetStr(pOut, zErr, -1, SQLITE_UTF8, sqlite3_free); + } + break; +} +#endif /* SQLITE_OMIT_VIRTUALTABLE */ + #ifndef SQLITE_OMIT_VIRTUALTABLE /* Opcode: VInitIn P1 P2 P3 * * ** Synopsis: r[P2]=ValueList(P1,P3) @@ -96605,7 +101282,7 @@ case OP_VOpen: { ** cursor. Register P3 is used to hold the values returned by ** sqlite3_vtab_in_first() and sqlite3_vtab_in_next(). */ -case OP_VInitIn: { /* out2 */ +case OP_VInitIn: { /* out2, ncycle */ VdbeCursor *pC; /* The cursor containing the RHS values */ ValueList *pRhs; /* New ValueList object to put in reg[P2] */ @@ -96616,7 +101293,7 @@ case OP_VInitIn: { /* out2 */ pRhs->pOut = &aMem[pOp->p3]; pOut = out2Prerelease(p, pOp); pOut->flags = MEM_Null; - sqlite3VdbeMemSetPointer(pOut, pRhs, "ValueList", sqlite3_free); + sqlite3VdbeMemSetPointer(pOut, pRhs, "ValueList", sqlite3VdbeValueListFree); break; } #endif /* SQLITE_OMIT_VIRTUALTABLE */ @@ -96642,7 +101319,7 @@ case OP_VInitIn: { /* out2 */ ** ** A jump is made to P2 if the result set after filtering would be empty. */ -case OP_VFilter: { /* jump */ +case OP_VFilter: { /* jump, ncycle */ int nArg; int iQuery; const sqlite3_module *pModule; @@ -96702,11 +101379,12 @@ case OP_VFilter: { /* jump */ ** bits (OPFLAG_LENGTHARG or OPFLAG_TYPEOFARG) but those bits are ** unused by OP_VColumn. */ -case OP_VColumn: { +case OP_VColumn: { /* ncycle */ sqlite3_vtab *pVtab; const sqlite3_module *pModule; Mem *pDest; sqlite3_context sContext; + FuncDef nullFunc; VdbeCursor *pCur = p->apCsr[pOp->p1]; assert( pCur!=0 ); @@ -96724,6 +101402,9 @@ case OP_VColumn: { memset(&sContext, 0, sizeof(sContext)); sContext.pOut = pDest; sContext.enc = encoding; + nullFunc.pUserData = 0; + nullFunc.funcFlags = SQLITE_RESULT_SUBTYPE; + sContext.pFunc = &nullFunc; assert( pOp->p5==OPFLAG_NOCHNG || pOp->p5==0 ); if( pOp->p5 & OPFLAG_NOCHNG ){ sqlite3VdbeMemSetNull(pDest); @@ -96754,7 +101435,7 @@ case OP_VColumn: { ** jump to instruction P2. Or, if the virtual table has reached ** the end of its result set, then fall through to the next instruction. */ -case OP_VNext: { /* jump */ +case OP_VNext: { /* jump, ncycle */ sqlite3_vtab *pVtab; const sqlite3_module *pModule; int res; @@ -96985,7 +101666,7 @@ case OP_MaxPgcnt: { /* out2 */ ** This opcode works exactly like OP_Function. The only difference is in ** its name. This opcode is used in places where the function must be ** purely non-deterministic. Some built-in date/time functions can be -** either determinitic of non-deterministic, depending on their arguments. +** either deterministic of non-deterministic, depending on their arguments. ** When those function are used in a non-deterministic way, they will check ** to see if they were called using OP_PureFunc instead of OP_Function, and ** if they were, they throw an error. @@ -97003,7 +101684,7 @@ case OP_Function: { /* group */ /* If this function is inside of a trigger, the register array in aMem[] ** might change from one evaluation to the next. The next block of code ** checks to see if the register array has changed, and if so it - ** reinitializes the relavant parts of the sqlite3_context object */ + ** reinitializes the relevant parts of the sqlite3_context object */ pOut = &aMem[pOp->p3]; if( pCtx->pOut != pOut ){ pCtx->pVdbe = p; @@ -97056,6 +101737,42 @@ case OP_ClrSubtype: { /* in1 */ break; } +/* Opcode: GetSubtype P1 P2 * * * +** Synopsis: r[P2] = r[P1].subtype +** +** Extract the subtype value from register P1 and write that subtype +** into register P2. If P1 has no subtype, then P1 gets a NULL. +*/ +case OP_GetSubtype: { /* in1 out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + if( pIn1->flags & MEM_Subtype ){ + sqlite3VdbeMemSetInt64(pOut, pIn1->eSubtype); + }else{ + sqlite3VdbeMemSetNull(pOut); + } + break; +} + +/* Opcode: SetSubtype P1 P2 * * * +** Synopsis: r[P2].subtype = r[P1] +** +** Set the subtype value of register P2 to the integer from register P1. +** If P1 is NULL, clear the subtype from p2. +*/ +case OP_SetSubtype: { /* in1 out2 */ + pIn1 = &aMem[pOp->p1]; + pOut = &aMem[pOp->p2]; + if( pIn1->flags & MEM_Null ){ + pOut->flags &= ~MEM_Subtype; + }else{ + assert( pIn1->flags & MEM_Int ); + pOut->flags |= MEM_Subtype; + pOut->eSubtype = (u8)(pIn1->u.i & 0xff); + } + break; +} + /* Opcode: FilterAdd P1 * P3 P4 * ** Synopsis: filter(P1) += key(P3@P4) ** @@ -97079,7 +101796,7 @@ case OP_FilterAdd: { printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n)); } #endif - h %= pIn1->n; + h %= (pIn1->n*8); pIn1->z[h/8] |= 1<<(h&7); break; } @@ -97115,7 +101832,7 @@ case OP_Filter: { /* jump */ printf("hash: %llu modulo %d -> %u\n", h, pIn1->n, (int)(h%pIn1->n)); } #endif - h %= pIn1->n; + h %= (pIn1->n*8); if( (pIn1->z[h/8] & (1<<(h&7)))==0 ){ VdbeBranchTaken(1, 2); p->aCounter[SQLITE_STMTSTATUS_FILTER_HIT]++; @@ -97337,11 +102054,13 @@ default: { /* This is really OP_Noop, OP_Explain */ *****************************************************************************/ } -#ifdef VDBE_PROFILE - { - u64 endTime = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); - if( endTime>start ) pOrigOp->cycles += endTime - start; - pOrigOp->cnt++; +#if defined(VDBE_PROFILE) + *pnCycle += sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); + pnCycle = 0; +#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( pnCycle ){ + *pnCycle += sqlite3Hwtime(); + pnCycle = 0; } #endif @@ -97365,7 +102084,7 @@ default: { /* This is really OP_Noop, OP_Explain */ } if( opProperty==0xff ){ /* Never happens. This code exists to avoid a harmless linkage - ** warning aboud sqlite3VdbeRegisterDump() being defined but not + ** warning about sqlite3VdbeRegisterDump() being defined but not ** used. */ sqlite3VdbeRegisterDump(p); } @@ -97418,6 +102137,18 @@ default: { /* This is really OP_Noop, OP_Explain */ ** release the mutexes on btrees that were acquired at the ** top. */ vdbe_return: +#if defined(VDBE_PROFILE) + if( pnCycle ){ + *pnCycle += sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime(); + pnCycle = 0; + } +#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS) + if( pnCycle ){ + *pnCycle += sqlite3Hwtime(); + pnCycle = 0; + } +#endif + #ifndef SQLITE_OMIT_PROGRESS_CALLBACK while( nVmStep>=nProgressLimit && db->xProgress!=0 ){ nProgressLimit += db->nProgressOps; @@ -97429,7 +102160,9 @@ default: { /* This is really OP_Noop, OP_Explain */ } #endif p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep; - sqlite3VdbeLeave(p); + if( DbMaskNonZero(p->lockMask) ){ + sqlite3VdbeLeave(p); + } assert( rc!=SQLITE_OK || nExtraDelete==0 || sqlite3_strlike("DELETE%",p->zSql,0)!=0 ); @@ -97524,8 +102257,7 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){ /* Set the value of register r[1] in the SQL statement to integer iRow. ** This is done directly as a performance optimization */ - v->aMem[1].flags = MEM_Int; - v->aMem[1].u.i = iRow; + sqlite3VdbeMemSetInt64(&v->aMem[1], iRow); /* If the statement has been run before (and is paused at the OP_ResultRow) ** then back it up to the point where it does the OP_NotExists. This could @@ -97608,7 +102340,7 @@ SQLITE_API int sqlite3_blob_open( #endif *ppBlob = 0; #ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) || zTable==0 ){ + if( !sqlite3SafetyCheckOk(db) || zTable==0 || zColumn==0 ){ return SQLITE_MISUSE_BKPT; } #endif @@ -97807,7 +102539,7 @@ SQLITE_API int sqlite3_blob_open( if( pBlob && pBlob->pStmt ) sqlite3VdbeFinalize((Vdbe *)pBlob->pStmt); sqlite3DbFree(db, pBlob); } - sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : (char*)0), zErr); sqlite3DbFree(db, zErr); sqlite3ParseObjectReset(&sParse); rc = sqlite3ApiExit(db, rc); @@ -97966,7 +102698,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ ((Vdbe*)p->pStmt)->rc = SQLITE_OK; rc = blobSeekToRow(p, iRow, &zErr); if( rc!=SQLITE_OK ){ - sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : 0), zErr); + sqlite3ErrorWithMsg(db, rc, (zErr ? "%s" : (char*)0), zErr); sqlite3DbFree(db, zErr); } assert( rc!=SQLITE_SCHEMA ); @@ -98069,7 +102801,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ ** The threshold for the amount of main memory to use before flushing ** records to a PMA is roughly the same as the limit configured for the ** page-cache of the main database. Specifically, the threshold is set to -** the value returned by "PRAGMA main.page_size" multipled by +** the value returned by "PRAGMA main.page_size" multiplied by ** that returned by "PRAGMA main.cache_size", in bytes. ** ** If the sorter is running in single-threaded mode, then all PMAs generated @@ -98092,7 +102824,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *pBlob, sqlite3_int64 iRow){ ** ** If there are fewer than SORTER_MAX_MERGE_COUNT PMAs in total and the ** sorter is running in single-threaded mode, then these PMAs are merged -** incrementally as keys are retreived from the sorter by the VDBE. The +** incrementally as keys are retrieved from the sorter by the VDBE. The ** MergeEngine object, described in further detail below, performs this ** merge. ** @@ -98170,7 +102902,7 @@ struct SorterFile { struct SorterList { SorterRecord *pList; /* Linked list of records */ u8 *aMemory; /* If non-NULL, bulk memory to hold pList */ - int szPMA; /* Size of pList as PMA in bytes */ + i64 szPMA; /* Size of pList as PMA in bytes */ }; /* @@ -98255,7 +102987,7 @@ struct MergeEngine { ** ** Essentially, this structure contains all those fields of the VdbeSorter ** structure for which each thread requires a separate instance. For example, -** each thread requries its own UnpackedRecord object to unpack records in +** each thread requeries its own UnpackedRecord object to unpack records in ** as part of comparison operations. ** ** Before a background thread is launched, variable bDone is set to 0. Then, @@ -98279,10 +103011,10 @@ typedef int (*SorterCompare)(SortSubtask*,int*,const void*,int,const void*,int); struct SortSubtask { SQLiteThread *pThread; /* Background thread, if any */ int bDone; /* Set if thread is finished but not joined */ + int nPMA; /* Number of PMAs currently in file */ VdbeSorter *pSorter; /* Sorter that owns this sub-task */ UnpackedRecord *pUnpacked; /* Space to unpack a record */ SorterList list; /* List for thread to write to a PMA */ - int nPMA; /* Number of PMAs currently in file */ SorterCompare xCompare; /* Compare function to use */ SorterFile file; /* Temp file for level-0 PMAs */ SorterFile file2; /* Space for other PMAs */ @@ -98327,7 +103059,7 @@ struct VdbeSorter { ** PMA, in sorted order. The next key to be read is cached in nKey/aKey. ** aKey might point into aMap or into aBuffer. If neither of those locations ** contain a contiguous representation of the key, then aAlloc is allocated -** and the key is copied into aAlloc and aKey is made to poitn to aAlloc. +** and the key is copied into aAlloc and aKey is made to point to aAlloc. ** ** pFd==0 at EOF. */ @@ -99698,7 +104430,7 @@ static int vdbeSorterFlushPMA(VdbeSorter *pSorter){ ** the background thread from a sub-tasks previous turn is still running, ** skip it. If the first (pSorter->nTask-1) sub-tasks are all still busy, ** fall back to using the final sub-task. The first (pSorter->nTask-1) - ** sub-tasks are prefered as they use background threads - the final + ** sub-tasks are preferred as they use background threads - the final ** sub-task uses the main thread. */ for(i=0; iiPrev + i + 1) % nWorker; @@ -99756,8 +104488,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterWrite( int rc = SQLITE_OK; /* Return Code */ SorterRecord *pNew; /* New list element */ int bFlush; /* True to flush contents of memory to PMA */ - int nReq; /* Bytes of memory required */ - int nPMA; /* Bytes of PMA space required */ + i64 nReq; /* Bytes of memory required */ + i64 nPMA; /* Bytes of PMA space required */ int t; /* serial type of first record field */ assert( pCsr->eCurType==CURTYPE_SORTER ); @@ -100182,7 +104914,7 @@ static int vdbePmaReaderIncrMergeInit(PmaReader *pReadr, int eMode){ rc = vdbeMergeEngineInit(pTask, pIncr->pMerger, eMode); - /* Set up the required files for pIncr. A multi-theaded IncrMerge object + /* Set up the required files for pIncr. A multi-threaded IncrMerge object ** requires two temp files to itself, whereas a single-threaded object ** only requires a region of pTask->file2. */ if( rc==SQLITE_OK ){ @@ -100822,6 +105554,8 @@ static int bytecodevtabConnect( "p5 INT," "comment TEXT," "subprog TEXT," + "nexec INT," + "ncycle INT," "stmt HIDDEN" ");", @@ -100836,6 +105570,9 @@ static int bytecodevtabConnect( ");" }; + (void)argc; + (void)argv; + (void)pzErr; rc = sqlite3_declare_vtab(db, azSchema[isTabUsed]); if( rc==SQLITE_OK ){ pNew = sqlite3_malloc( sizeof(*pNew) ); @@ -100981,7 +105718,7 @@ static int bytecodevtabColumn( } } } - i += 10; + i += 20; } } switch( i ){ @@ -101031,16 +105768,31 @@ static int bytecodevtabColumn( } break; } - case 10: /* tables_used.type */ + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + case 9: /* nexec */ + sqlite3_result_int(ctx, pOp->nExec); + break; + case 10: /* ncycle */ + sqlite3_result_int(ctx, pOp->nCycle); + break; +#else + case 9: /* nexec */ + case 10: /* ncycle */ + sqlite3_result_int(ctx, 0); + break; +#endif + + case 20: /* tables_used.type */ sqlite3_result_text(ctx, pCur->zType, -1, SQLITE_STATIC); break; - case 11: /* tables_used.schema */ + case 21: /* tables_used.schema */ sqlite3_result_text(ctx, pCur->zSchema, -1, SQLITE_STATIC); break; - case 12: /* tables_used.name */ + case 22: /* tables_used.name */ sqlite3_result_text(ctx, pCur->zName, -1, SQLITE_STATIC); break; - case 13: /* tables_used.wr */ + case 23: /* tables_used.wr */ sqlite3_result_int(ctx, pOp->opcode==OP_OpenWrite); break; } @@ -101071,6 +105823,7 @@ static int bytecodevtabFilter( bytecodevtab_cursor *pCur = (bytecodevtab_cursor *)pVtabCursor; bytecodevtab *pVTab = (bytecodevtab *)pVtabCursor->pVtab; int rc = SQLITE_OK; + (void)idxStr; bytecodevtabCursorClear(pCur); pCur->iRowid = 0; @@ -101113,7 +105866,7 @@ static int bytecodevtabBestIndex( int rc = SQLITE_CONSTRAINT; struct sqlite3_index_constraint *p; bytecodevtab *pVTab = (bytecodevtab*)tab; - int iBaseCol = pVTab->bTablesUsed ? 4 : 8; + int iBaseCol = pVTab->bTablesUsed ? 4 : 10; pIdxInfo->estimatedCost = (double)100; pIdxInfo->estimatedRows = 100; pIdxInfo->idxNum = 0; @@ -101160,7 +105913,8 @@ static sqlite3_module bytecodevtabModule = { /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ 0, - /* xShadowName */ 0 + /* xShadowName */ 0, + /* xIntegrity */ 0 }; @@ -101539,6 +106293,8 @@ SQLITE_PRIVATE int sqlite3JournalOpen( ){ MemJournal *p = (MemJournal*)pJfd; + assert( zName || nSpill<0 || (flags & SQLITE_OPEN_EXCLUSIVE) ); + /* Zero the file-handle object. If nSpill was passed zero, initialize ** it using the sqlite3OsOpen() function of the underlying VFS. In this ** case none of the code in this module is executed as a result of calls @@ -101805,8 +106561,13 @@ sqlite3_mutex* sqlcipher_mutex(int); /* #include "pager.h" */ /* #include "vdbeInt.h" */ -#ifdef __ANDROID__ +#if !defined(SQLCIPHER_OMIT_LOG_DEVICE) +#if defined(__ANDROID__) #include +#elif defined(__APPLE__) +/* #include */ +#include +#endif #endif /* #include */ @@ -101849,7 +106610,7 @@ SQLITE_API void sqlite3pager_reset(Pager *pPager); #define CIPHER_STR(s) #s #ifndef CIPHER_VERSION_NUMBER -#define CIPHER_VERSION_NUMBER 4.5.3 +#define CIPHER_VERSION_NUMBER 4.6.0 #endif #ifndef CIPHER_VERSION_BUILD @@ -101867,10 +106628,17 @@ SQLITE_API void sqlite3pager_reset(Pager *pPager); #define PBKDF2_ITER 256000 #endif -/* possible flags for cipher_ctx->flags */ -#define CIPHER_FLAG_HMAC 0x01 -#define CIPHER_FLAG_LE_PGNO 0x02 -#define CIPHER_FLAG_BE_PGNO 0x04 +#define SQLCIPHER_FLAG_GET(FLAG,BIT) ((FLAG & BIT) != 0) +#define SQLCIPHER_FLAG_SET(FLAG,BIT) FLAG |= BIT +#define SQLCIPHER_FLAG_UNSET(FLAG,BIT) FLAG &= ~BIT + +/* possible flags for codec_ctx->flags */ +#define CIPHER_FLAG_HMAC (1 << 0) +#define CIPHER_FLAG_LE_PGNO (1 << 1) +#define CIPHER_FLAG_BE_PGNO (1 << 2) +#define CIPHER_FLAG_KEY_USED (1 << 3) +#define CIPHER_FLAG_HAS_KDF_SALT (1 << 4) + #ifndef DEFAULT_CIPHER_FLAGS #define DEFAULT_CIPHER_FLAGS CIPHER_FLAG_HMAC | CIPHER_FLAG_LE_PGNO @@ -101979,8 +106747,6 @@ typedef struct { int plaintext_header_sz; int hmac_algorithm; int kdf_algorithm; - unsigned int skip_read_hmac; - unsigned int need_kdf_salt; unsigned int flags; unsigned char *kdf_salt; unsigned char *hmac_kdf_salt; @@ -102052,10 +106818,6 @@ unsigned char sqlcipher_get_hmac_salt_mask(void); int sqlcipher_codec_ctx_set_use_hmac(codec_ctx *ctx, int use); int sqlcipher_codec_ctx_get_use_hmac(codec_ctx *ctx); -int sqlcipher_codec_ctx_set_flag(codec_ctx *ctx, unsigned int flag); -int sqlcipher_codec_ctx_unset_flag(codec_ctx *ctx, unsigned int flag); -int sqlcipher_codec_ctx_get_flag(codec_ctx *ctx, unsigned int flag); - const char* sqlcipher_codec_get_cipher_provider(codec_ctx *ctx); int sqlcipher_codec_ctx_migrate(codec_ctx *ctx); int sqlcipher_codec_add_random(codec_ctx *ctx, const char *data, int random_sz); @@ -102090,7 +106852,6 @@ int sqlcipher_codec_ctx_integrity_check(codec_ctx *, Parse *, char *); int sqlcipher_set_log(const char *destination); void sqlcipher_set_log_level(unsigned int level); -void sqlcipher_log(unsigned int tag, const char *message, ...); #define SQLCIPHER_LOG_NONE 0x00 #define SQLCIPHER_LOG_ERROR 0x01 @@ -102100,6 +106861,12 @@ void sqlcipher_log(unsigned int tag, const char *message, ...); #define SQLCIPHER_LOG_TRACE 0x10 #define SQLCIPHER_LOG_ALL 0xffffffff +#ifdef SQLCIPHER_OMIT_LOG +#define sqlcipher_log(tag, message, ...) +#else +void sqlcipher_log(unsigned int tag, const char *message, ...); +#endif + void sqlcipher_vdbe_return_string(Parse*, const char*, const char*, int); #ifdef CODEC_DEBUG_PAGEDATA @@ -102122,6 +106889,7 @@ void sqlcipher_vdbe_return_string(Parse*, const char*, const char*, int); #endif /* END SQLCIPHER */ + /************** End of crypto.h **********************************************/ /************** Continuing where we left off in crypto.c *********************/ @@ -102204,13 +106972,13 @@ int sqlcipher_codec_pragma(sqlite3* db, int iDb, Parse *pParse, const char *zLef if( zRight ) { unsigned int flags = sqlcipher_get_test_flags(); if(sqlite3_stricmp(zRight, "fail_encrypt")==0) { - flags |= TEST_FAIL_ENCRYPT; + SQLCIPHER_FLAG_SET(flags,TEST_FAIL_ENCRYPT); } else if(sqlite3_stricmp(zRight, "fail_decrypt")==0) { - flags |= TEST_FAIL_DECRYPT; + SQLCIPHER_FLAG_SET(flags,TEST_FAIL_DECRYPT); } else if(sqlite3_stricmp(zRight, "fail_migrate")==0) { - flags |= TEST_FAIL_MIGRATE; + SQLCIPHER_FLAG_SET(flags,TEST_FAIL_MIGRATE); } sqlcipher_set_test_flags(flags); } @@ -102219,13 +106987,13 @@ int sqlcipher_codec_pragma(sqlite3* db, int iDb, Parse *pParse, const char *zLef if( zRight ) { unsigned int flags = sqlcipher_get_test_flags(); if(sqlite3_stricmp(zRight, "fail_encrypt")==0) { - flags &= ~TEST_FAIL_ENCRYPT; + SQLCIPHER_FLAG_UNSET(flags,TEST_FAIL_ENCRYPT); } else if(sqlite3_stricmp(zRight, "fail_decrypt")==0) { - flags &= ~TEST_FAIL_DECRYPT; + SQLCIPHER_FLAG_UNSET(flags,TEST_FAIL_DECRYPT); } else if(sqlite3_stricmp(zRight, "fail_migrate")==0) { - flags &= ~TEST_FAIL_MIGRATE; + SQLCIPHER_FLAG_UNSET(flags,TEST_FAIL_MIGRATE); } sqlcipher_set_test_flags(flags); } @@ -102350,7 +107118,12 @@ int sqlcipher_codec_pragma(sqlite3* db, int iDb, Parse *pParse, const char *zLef sqlcipher_vdbe_return_string(pParse, "rekey_kdf_iter", message, P4_TRANSIENT); sqlite3_log(SQLITE_WARNING, message); }else - if( sqlite3_stricmp(zLeft,"cipher_page_size")==0 ){ + if( sqlite3_stricmp(zLeft,"page_size")==0 || sqlite3_stricmp(zLeft,"cipher_page_size")==0 ){ + /* PRAGMA cipher_page_size will alter the size of the database pages while ensuring that the + required reserve space is allocated at the end of each page. This will also override the + standard SQLite PRAGMA page_size behavior if a codec context is attached to the database handle. + If PRAGMA page_size is invoked but a codec context is not attached (i.e. dealing with a standard + unencrypted database) then return early and allow the standard PRAGMA page_size logic to apply. */ if(ctx) { if( zRight ) { int size = atoi(zRight); @@ -102362,6 +107135,8 @@ int sqlcipher_codec_pragma(sqlite3* db, int iDb, Parse *pParse, const char *zLef char * page_size = sqlite3_mprintf("%d", sqlcipher_codec_ctx_get_pagesize(ctx)); sqlcipher_vdbe_return_string(pParse, "cipher_page_size", page_size, P4_DYNAMIC); } + } else { + return 0; /* return early so that the PragTyp_PAGE_SIZE case logic in pragma.c will take effect */ } }else if( sqlite3_stricmp(zLeft,"cipher_default_page_size")==0 ){ @@ -102400,22 +107175,22 @@ int sqlcipher_codec_pragma(sqlite3* db, int iDb, Parse *pParse, const char *zLef char *deprecation = "PRAGMA cipher_hmac_pgno is deprecated, please remove from use"; /* clear both pgno endian flags */ if(sqlite3_stricmp(zRight, "le") == 0) { - sqlcipher_codec_ctx_unset_flag(ctx, CIPHER_FLAG_BE_PGNO); - sqlcipher_codec_ctx_set_flag(ctx, CIPHER_FLAG_LE_PGNO); + SQLCIPHER_FLAG_UNSET(ctx->flags, CIPHER_FLAG_BE_PGNO); + SQLCIPHER_FLAG_SET(ctx->flags, CIPHER_FLAG_LE_PGNO); } else if(sqlite3_stricmp(zRight, "be") == 0) { - sqlcipher_codec_ctx_unset_flag(ctx, CIPHER_FLAG_LE_PGNO); - sqlcipher_codec_ctx_set_flag(ctx, CIPHER_FLAG_BE_PGNO); + SQLCIPHER_FLAG_UNSET(ctx->flags, CIPHER_FLAG_LE_PGNO); + SQLCIPHER_FLAG_SET(ctx->flags, CIPHER_FLAG_BE_PGNO); } else if(sqlite3_stricmp(zRight, "native") == 0) { - sqlcipher_codec_ctx_unset_flag(ctx, CIPHER_FLAG_LE_PGNO); - sqlcipher_codec_ctx_unset_flag(ctx, CIPHER_FLAG_BE_PGNO); + SQLCIPHER_FLAG_UNSET(ctx->flags, CIPHER_FLAG_LE_PGNO); + SQLCIPHER_FLAG_UNSET(ctx->flags, CIPHER_FLAG_BE_PGNO); } sqlcipher_vdbe_return_string(pParse, "cipher_hmac_pgno", deprecation, P4_TRANSIENT); sqlite3_log(SQLITE_WARNING, deprecation); } else { - if(sqlcipher_codec_ctx_get_flag(ctx, CIPHER_FLAG_LE_PGNO)) { + if(SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_LE_PGNO)) { sqlcipher_vdbe_return_string(pParse, "cipher_hmac_pgno", "le", P4_TRANSIENT); - } else if(sqlcipher_codec_ctx_get_flag(ctx, CIPHER_FLAG_BE_PGNO)) { + } else if(SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_BE_PGNO)) { sqlcipher_vdbe_return_string(pParse, "cipher_hmac_pgno", "be", P4_TRANSIENT); } else { sqlcipher_vdbe_return_string(pParse, "cipher_hmac_pgno", "native", P4_TRANSIENT); @@ -102851,7 +107626,7 @@ static void* sqlite3Codec(void *iCtx, void *data, Pgno pgno, int mode) { #ifdef SQLCIPHER_TEST if((sqlcipher_get_test_flags() & TEST_FAIL_DECRYPT) > 0 && sqlcipher_get_test_fail()) { rc = SQLITE_ERROR; - sqlcipher_log(SQLCIPHER_LOG_ERROR, "simulating decryption failure for pgno=%d, mode=%d, page_sz=%d\n", pgno, mode, page_sz); + sqlcipher_log(SQLCIPHER_LOG_WARN, "sqlite3Codec: simulating decryption failure for pgno=%d, mode=%d, page_sz=%d\n", pgno, mode, page_sz); } #endif if(rc != SQLITE_OK) { @@ -102860,6 +107635,8 @@ static void* sqlite3Codec(void *iCtx, void *data, Pgno pgno, int mode) { sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3Codec: error decrypting page %d data: %d", pgno, rc); sqlcipher_memset((unsigned char*) buffer+offset, 0, page_sz-offset); sqlcipher_codec_ctx_set_error(ctx, rc); + } else { + SQLCIPHER_FLAG_SET(ctx->flags, CIPHER_FLAG_KEY_USED); } memcpy(pData, buffer, page_sz); /* copy buffer data back to pData and return */ return pData; @@ -102883,7 +107660,7 @@ static void* sqlite3Codec(void *iCtx, void *data, Pgno pgno, int mode) { #ifdef SQLCIPHER_TEST if((sqlcipher_get_test_flags() & TEST_FAIL_ENCRYPT) > 0 && sqlcipher_get_test_fail()) { rc = SQLITE_ERROR; - sqlcipher_log(SQLCIPHER_LOG_ERROR, "simulating encryption failure for pgno=%d, mode=%d, page_sz=%d\n", pgno, mode, page_sz); + sqlcipher_log(SQLCIPHER_LOG_WARN, "sqlite3Codec: simulating encryption failure for pgno=%d, mode=%d, page_sz=%d\n", pgno, mode, page_sz); } #endif if(rc != SQLITE_OK) { @@ -102894,6 +107671,7 @@ static void* sqlite3Codec(void *iCtx, void *data, Pgno pgno, int mode) { sqlcipher_codec_ctx_set_error(ctx, rc); return NULL; } + SQLCIPHER_FLAG_SET(ctx->flags, CIPHER_FLAG_KEY_USED); return buffer; /* return persistent buffer data, pData remains intact */ break; @@ -102923,6 +107701,14 @@ int sqlcipherCodecAttach(sqlite3* db, int nDb, const void *zKey, int nKey) { sqlite3_file *fd; codec_ctx *ctx; + ctx = (codec_ctx*) sqlcipherPagerGetCodec(pDb->pBt->pBt->pPager); + + if(ctx != NULL && SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_KEY_USED)) { + /* there is already a codec attached to this database, so we should not proceed */ + sqlcipher_log(SQLCIPHER_LOG_WARN, "sqlcipherCodecAttach: no codec attached to db"); + return SQLITE_OK; + } + /* check if the sqlite3_file is open, and if not force handle to NULL */ if((fd = sqlite3PagerFile(pPager))->pMethods == 0) fd = NULL; @@ -102946,7 +107732,7 @@ int sqlcipherCodecAttach(sqlite3* db, int nDb, const void *zKey, int nKey) { if(rc != SQLITE_OK) { /* initialization failed, do not attach potentially corrupted context */ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipherCodecAttach: context initialization failed forcing error state with rc=%d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipherCodecAttach: context initialization failed, forcing error state with rc=%d", rc); /* force an error at the pager level, such that even the upstream caller ignores the return code the pager will be in an error state and will process no further operations */ sqlite3pager_error(pPager, rc); @@ -103013,7 +107799,7 @@ SQLITE_API int sqlite3_key_v2(sqlite3 *db, const char *zDb, const void *pKey, in int db_index = sqlcipher_find_db_index(db, zDb); return sqlcipherCodecAttach(db, db_index, pKey, nKey); } - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_key_v2: no key provided"); + sqlcipher_log(SQLCIPHER_LOG_WARN, "sqlite3_key_v2: no key provided"); return SQLITE_ERROR; } @@ -103049,8 +107835,8 @@ SQLITE_API int sqlite3_rekey_v2(sqlite3 *db, const char *zDb, const void *pKey, if(ctx == NULL) { /* there was no codec attached to this database, so this should do nothing! */ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_rekey_v2: no codec attached to db, exiting"); - return SQLITE_OK; + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_rekey_v2: no codec attached to db %s: rekey can't be used on an unencrypted database", zDb); + return SQLITE_MISUSE; } sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlite3_rekey_v2: entering database mutex %p", db->mutex); @@ -103078,7 +107864,7 @@ SQLITE_API int sqlite3_rekey_v2(sqlite3 *db, const char *zDb, const void *pKey, sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_rekey_v2: error %d occurred writing page %d", rc, pgno); } } else { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_rekey_v2: error %d occurred getting page %d", rc, pgno); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_rekey_v2: error %d occurred reading page %d", rc, pgno); } } } @@ -103099,7 +107885,7 @@ SQLITE_API int sqlite3_rekey_v2(sqlite3 *db, const char *zDb, const void *pKey, } return SQLITE_OK; } - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_rekey_v2: no key provided"); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlite3_rekey_v2: no key provided for db %s: rekey can't be used to decrypt an encrypted database", zDb); return SQLITE_ERROR; } @@ -103431,8 +108217,9 @@ static sqlcipher_provider *default_provider = NULL; static sqlite3_mutex* sqlcipher_static_mutex[SQLCIPHER_MUTEX_COUNT]; static FILE* sqlcipher_log_file = NULL; -static volatile int sqlcipher_log_logcat = 0; +static volatile int sqlcipher_log_device = 0; static volatile unsigned int sqlcipher_log_level = SQLCIPHER_LOG_NONE; +static volatile int sqlcipher_log_set = 0; sqlite3_mutex* sqlcipher_mutex(int mutex) { if(mutex < 0 || mutex >= SQLCIPHER_MUTEX_COUNT) return NULL; @@ -103555,6 +108342,19 @@ void sqlcipher_activate() { for(i = 0; i < SQLCIPHER_MUTEX_COUNT; i++) { sqlcipher_static_mutex[i] = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); } +#ifndef SQLCIPHER_OMIT_DEFAULT_LOGGING + /* when sqlcipher is first activated, set a default log target and level of WARN. Use the "device log" + for android (logcat) or apple (console). Use stderr on all other platforms. */ + if(!sqlcipher_log_set) { + sqlcipher_log_level = SQLCIPHER_LOG_WARN; +#if defined(__ANDROID__) || defined(__APPLE_) + sqlcipher_log_device = 1; +#else + sqlcipher_log_file = stderr; +#endif + sqlcipher_log_set = 1; + } +#endif } /* check to see if there is a provider registered at this point @@ -103637,7 +108437,7 @@ void sqlcipher_deactivate() { Note: As suggested by Joachim Schipper (joachim.schipper@fox-it.com) */ void* sqlcipher_memset(void *v, unsigned char value, sqlite_uint64 len) { - sqlite_uint64 i = 0; + volatile sqlite_uint64 i = 0; volatile unsigned char *a = v; if (v == NULL) return v; @@ -103654,8 +108454,8 @@ void* sqlcipher_memset(void *v, unsigned char value, sqlite_uint64 len) { matches a single value (i.e. the memory is all zeros) returns 0 if match, 1 of no match */ int sqlcipher_ismemset(const void *v, unsigned char value, sqlite_uint64 len) { - const unsigned char *a = v; - sqlite_uint64 i = 0, result = 0; + const volatile unsigned char *a = v; + volatile sqlite_uint64 i = 0, result = 0; for(i = 0; i < len; i++) { result |= a[i] ^ value; @@ -103667,8 +108467,8 @@ int sqlcipher_ismemset(const void *v, unsigned char value, sqlite_uint64 len) { /* constant time memory comparison routine. returns 0 if match, 1 if no match */ int sqlcipher_memcmp(const void *v0, const void *v1, int len) { - const unsigned char *a0 = v0, *a1 = v1; - int i = 0, result = 0; + const volatile unsigned char *a0 = v0, *a1 = v1; + volatile int i = 0, result = 0; for(i = 0; i < len; i++) { result |= a0[i] ^ a1[i]; @@ -103686,18 +108486,20 @@ void sqlcipher_mlock(void *ptr, sqlite_uint64 sz) { if(ptr == NULL || sz == 0) return; - sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_mem_lock: calling mlock(%p,%lu); _SC_PAGESIZE=%lu", ptr - offset, sz + offset, pagesize); + sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_mlock: calling mlock(%p,%lu); _SC_PAGESIZE=%lu", ptr - offset, sz + offset, pagesize); rc = mlock(ptr - offset, sz + offset); if(rc!=0) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_mem_lock: mlock(%p,%lu) returned %d errno=%d", ptr - offset, sz + offset, rc, errno); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_mlock: mlock() returned %d errno=%d", rc, errno); + sqlcipher_log(SQLCIPHER_LOG_INFO, "sqlcipher_mlock: mlock(%p,%lu) returned %d errno=%d", ptr - offset, sz + offset, rc, errno); } #elif defined(_WIN32) #if !(defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP || WINAPI_FAMILY == WINAPI_FAMILY_APP)) int rc; - sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_mem_lock: calling VirtualLock(%p,%d)", ptr, sz); + sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_mlock: calling VirtualLock(%p,%d)", ptr, sz); rc = VirtualLock(ptr, sz); if(rc==0) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_mem_lock: VirtualLock(%p,%d) returned %d LastError=%d", ptr, sz, rc, GetLastError()); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_mlock: VirtualLock() returned %d LastError=%d", rc, GetLastError()); + sqlcipher_log(SQLCIPHER_LOG_INFO, "sqlcipher_mlock: VirtualLock(%p,%d) returned %d LastError=%d", ptr, sz, rc, GetLastError()); } #endif #endif @@ -103713,18 +108515,25 @@ void sqlcipher_munlock(void *ptr, sqlite_uint64 sz) { if(ptr == NULL || sz == 0) return; - sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_mem_unlock: calling munlock(%p,%lu)", ptr - offset, sz + offset); + sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_munlock: calling munlock(%p,%lu)", ptr - offset, sz + offset); rc = munlock(ptr - offset, sz + offset); if(rc!=0) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_mem_unlock: munlock(%p,%lu) returned %d errno=%d", ptr - offset, sz + offset, rc, errno); + sqlcipher_log(SQLCIPHER_LOG_INFO, "sqlcipher_munlock: munlock(%p,%lu) returned %d errno=%d", ptr - offset, sz + offset, rc, errno); } #elif defined(_WIN32) #if !(defined(WINAPI_FAMILY) && (WINAPI_FAMILY == WINAPI_FAMILY_PHONE_APP || WINAPI_FAMILY == WINAPI_FAMILY_APP)) int rc; - sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_mem_lock: calling VirtualUnlock(%p,%d)", ptr, sz); + + if(ptr == NULL || sz == 0) return; + + sqlcipher_log(SQLCIPHER_LOG_TRACE, "sqlcipher_munlock: calling VirtualUnlock(%p,%d)", ptr, sz); rc = VirtualUnlock(ptr, sz); + + /* because memory allocations may be made from the same individual page, it is possible for VirtualUnlock to be called + * multiple times for the same page. Subsequent calls will return an error, but this can be safely ignored (i.e. because + * the previous call for that page unlocked the memory already). Log an info level event only in that case. */ if(!rc) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_mem_unlock: VirtualUnlock(%p,%d) returned %d LastError=%d", ptr, sz, rc, GetLastError()); + sqlcipher_log(SQLCIPHER_LOG_INFO, "sqlcipher_munlock: VirtualUnlock(%p,%d) returned %d LastError=%d", ptr, sz, rc, GetLastError()); } #endif #endif @@ -103802,10 +108611,10 @@ static int sqlcipher_cipher_ctx_init(codec_ctx *ctx, cipher_ctx **iCtx) { static void sqlcipher_cipher_ctx_free(codec_ctx* ctx, cipher_ctx **iCtx) { cipher_ctx *c_ctx = *iCtx; sqlcipher_log(SQLCIPHER_LOG_DEBUG, "cipher_ctx_free: iCtx=%p", iCtx); - sqlcipher_free(c_ctx->key, ctx->key_sz); - sqlcipher_free(c_ctx->hmac_key, ctx->key_sz); - sqlcipher_free(c_ctx->pass, c_ctx->pass_sz); - sqlcipher_free(c_ctx->keyspec, ctx->keyspec_sz); + if(c_ctx->key) sqlcipher_free(c_ctx->key, ctx->key_sz); + if(c_ctx->hmac_key) sqlcipher_free(c_ctx->hmac_key, ctx->key_sz); + if(c_ctx->pass) sqlcipher_free(c_ctx->pass, c_ctx->pass_sz); + if(c_ctx->keyspec) sqlcipher_free(c_ctx->keyspec, ctx->keyspec_sz); sqlcipher_free(c_ctx, sizeof(cipher_ctx)); } @@ -103819,8 +108628,10 @@ static int sqlcipher_codec_ctx_reserve_setup(codec_ctx *ctx) { reserve += ctx->hmac_sz; /* if reserve will include hmac, update that size */ /* calculate the amount of reserve needed in even increments of the cipher block size */ - reserve = ((reserve % ctx->block_sz) == 0) ? reserve : + if(ctx->block_sz > 0) { + reserve = ((reserve % ctx->block_sz) == 0) ? reserve : ((reserve / ctx->block_sz) + 1) * ctx->block_sz; + } sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_codec_ctx_reserve_setup: base_reserve=%d block_sz=%d md_size=%d reserve=%d", base_reserve, ctx->block_sz, ctx->hmac_sz, reserve); @@ -103874,8 +108685,8 @@ static int sqlcipher_cipher_ctx_copy(codec_ctx *ctx, cipher_ctx *target, cipher_ void *hmac_key = target->hmac_key; sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_cipher_ctx_copy: target=%p, source=%p", target, source); - sqlcipher_free(target->pass, target->pass_sz); - sqlcipher_free(target->keyspec, ctx->keyspec_sz); + if(target->pass) sqlcipher_free(target->pass, target->pass_sz); + if(target->keyspec) sqlcipher_free(target->keyspec, ctx->keyspec_sz); memcpy(target, source, sizeof(cipher_ctx)); target->key = key; /* restore pointer to previously allocated key data */ @@ -103905,7 +108716,7 @@ static int sqlcipher_cipher_ctx_copy(codec_ctx *ctx, cipher_ctx *target, cipher_ */ static int sqlcipher_cipher_ctx_set_keyspec(codec_ctx *ctx, cipher_ctx *c_ctx, const unsigned char *key) { /* free, zero existing pointers and size */ - sqlcipher_free(c_ctx->keyspec, ctx->keyspec_sz); + if(c_ctx->keyspec) sqlcipher_free(c_ctx->keyspec, ctx->keyspec_sz); c_ctx->keyspec = NULL; c_ctx->keyspec = sqlcipher_malloc(ctx->keyspec_sz); @@ -103933,8 +108744,8 @@ void sqlcipher_codec_get_pass(codec_ctx *ctx, void **zKey, int *nKey) { } static void sqlcipher_set_derive_key(codec_ctx *ctx, int derive) { - if(ctx->read_ctx != NULL) ctx->read_ctx->derive_key = 1; - if(ctx->write_ctx != NULL) ctx->write_ctx->derive_key = 1; + if(ctx->read_ctx != NULL) ctx->read_ctx->derive_key = derive; + if(ctx->write_ctx != NULL) ctx->write_ctx->derive_key = derive; } /** @@ -103945,7 +108756,7 @@ static void sqlcipher_set_derive_key(codec_ctx *ctx, int derive) { */ static int sqlcipher_cipher_ctx_set_pass(cipher_ctx *ctx, const void *zKey, int nKey) { /* free, zero existing pointers and size */ - sqlcipher_free(ctx->pass, ctx->pass_sz); + if(ctx->pass) sqlcipher_free(ctx->pass, ctx->pass_sz); ctx->pass = NULL; ctx->pass_sz = 0; @@ -104014,12 +108825,12 @@ int sqlcipher_codec_ctx_get_fast_kdf_iter(codec_ctx *ctx) { /* set the global default flag for HMAC */ void sqlcipher_set_default_use_hmac(int use) { - if(use) default_flags |= CIPHER_FLAG_HMAC; - else default_flags &= ~CIPHER_FLAG_HMAC; + if(use) SQLCIPHER_FLAG_SET(default_flags, CIPHER_FLAG_HMAC); + else SQLCIPHER_FLAG_UNSET(default_flags,CIPHER_FLAG_HMAC); } int sqlcipher_get_default_use_hmac() { - return (default_flags & CIPHER_FLAG_HMAC) != 0; + return SQLCIPHER_FLAG_GET(default_flags, CIPHER_FLAG_HMAC); } void sqlcipher_set_hmac_salt_mask(unsigned char mask) { @@ -104033,16 +108844,16 @@ unsigned char sqlcipher_get_hmac_salt_mask() { /* set the codec flag for whether this individual database should be using hmac */ int sqlcipher_codec_ctx_set_use_hmac(codec_ctx *ctx, int use) { if(use) { - sqlcipher_codec_ctx_set_flag(ctx, CIPHER_FLAG_HMAC); + SQLCIPHER_FLAG_SET(ctx->flags, CIPHER_FLAG_HMAC); } else { - sqlcipher_codec_ctx_unset_flag(ctx, CIPHER_FLAG_HMAC); + SQLCIPHER_FLAG_UNSET(ctx->flags, CIPHER_FLAG_HMAC); } return sqlcipher_codec_ctx_reserve_setup(ctx); } int sqlcipher_codec_ctx_get_use_hmac(codec_ctx *ctx) { - return (ctx->flags & CIPHER_FLAG_HMAC) != 0; + return SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_HMAC); } /* the length of plaintext header size must be: @@ -104056,7 +108867,7 @@ int sqlcipher_set_default_plaintext_header_size(int size) { } int sqlcipher_codec_ctx_set_plaintext_header_size(codec_ctx *ctx, int size) { - if(size >= 0 && (size % ctx->block_sz) == 0 && size < (ctx->page_sz - ctx->reserve_sz)) { + if(size >= 0 && ctx->block_sz > 0 && (size % ctx->block_sz) == 0 && size < (ctx->page_sz - ctx->reserve_sz)) { ctx->plaintext_header_sz = size; return SQLITE_OK; } @@ -104111,22 +108922,8 @@ int sqlcipher_codec_ctx_get_kdf_algorithm(codec_ctx *ctx) { return ctx->kdf_algorithm; } -int sqlcipher_codec_ctx_set_flag(codec_ctx *ctx, unsigned int flag) { - ctx->flags |= flag; - return SQLITE_OK; -} - -int sqlcipher_codec_ctx_unset_flag(codec_ctx *ctx, unsigned int flag) { - ctx->flags &= ~flag; - return SQLITE_OK; -} - -int sqlcipher_codec_ctx_get_flag(codec_ctx *ctx, unsigned int flag) { - return (ctx->flags & flag) != 0; -} - void sqlcipher_codec_ctx_set_error(codec_ctx *ctx, int error) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_set_error: ctx=%p, error=%d", ctx, error); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_set_error %d", error); sqlite3pager_error(ctx->pBt->pBt->pPager, error); ctx->pBt->pBt->db->errCode = error; } @@ -104142,7 +108939,7 @@ void* sqlcipher_codec_ctx_get_data(codec_ctx *ctx) { static int sqlcipher_codec_ctx_init_kdf_salt(codec_ctx *ctx) { sqlite3_file *fd = sqlite3PagerFile(ctx->pBt->pBt->pPager); - if(!ctx->need_kdf_salt) { + if(SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_HAS_KDF_SALT)) { return SQLITE_OK; /* don't reload salt when not needed */ } @@ -104155,14 +108952,14 @@ static int sqlcipher_codec_ctx_init_kdf_salt(codec_ctx *ctx) { return SQLITE_ERROR; } } - ctx->need_kdf_salt = 0; + SQLCIPHER_FLAG_SET(ctx->flags, CIPHER_FLAG_HAS_KDF_SALT); return SQLITE_OK; } int sqlcipher_codec_ctx_set_kdf_salt(codec_ctx *ctx, unsigned char *salt, int size) { if(size >= ctx->kdf_salt_sz) { memcpy(ctx->kdf_salt, salt, ctx->kdf_salt_sz); - ctx->need_kdf_salt = 0; + SQLCIPHER_FLAG_SET(ctx->flags, CIPHER_FLAG_HAS_KDF_SALT); return SQLITE_OK; } sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_set_kdf_salt: attempt to set salt of incorrect size %d", size); @@ -104171,7 +108968,7 @@ int sqlcipher_codec_ctx_set_kdf_salt(codec_ctx *ctx, unsigned char *salt, int si int sqlcipher_codec_ctx_get_kdf_salt(codec_ctx *ctx, void** salt) { int rc = SQLITE_OK; - if(ctx->need_kdf_salt) { + if(!SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_HAS_KDF_SALT)) { if((rc = sqlcipher_codec_ctx_init_kdf_salt(ctx)) != SQLITE_OK) { sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_get_kdf_salt: error %d from sqlcipher_codec_ctx_init_kdf_salt", rc); } @@ -104192,7 +108989,7 @@ int sqlcipher_codec_ctx_set_pagesize(codec_ctx *ctx, int size) { return SQLITE_ERROR; } /* attempt to free the existing page buffer */ - sqlcipher_free(ctx->buffer,ctx->page_sz); + if(ctx->buffer) sqlcipher_free(ctx->buffer,ctx->page_sz); ctx->page_sz = size; /* pre-allocate a page buffer of PageSize bytes. This will @@ -104264,9 +109061,6 @@ int sqlcipher_codec_ctx_init(codec_ctx **iCtx, Db *pDb, Pager *pPager, const voi /* setup default flags */ ctx->flags = default_flags; - /* defer attempt to read KDF salt until first use */ - ctx->need_kdf_salt = 1; - /* setup the crypto provider */ sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_codec_ctx_init: allocating provider"); ctx->provider = (sqlcipher_provider *) sqlcipher_malloc(sizeof(sqlcipher_provider)); @@ -104326,8 +109120,8 @@ int sqlcipher_codec_ctx_init(codec_ctx **iCtx, Db *pDb, Pager *pPager, const voi /* Note that use_hmac is a special case that requires recalculation of page size so we call set_use_hmac to perform setup */ - if((rc = sqlcipher_codec_ctx_set_use_hmac(ctx, default_flags & CIPHER_FLAG_HMAC)) != SQLITE_OK) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_init: error %d setting use_hmac %d", rc, default_flags & CIPHER_FLAG_HMAC); + if((rc = sqlcipher_codec_ctx_set_use_hmac(ctx, SQLCIPHER_FLAG_GET(default_flags, CIPHER_FLAG_HMAC))) != SQLITE_OK) { + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_init: error %d setting use_hmac %d", rc, SQLCIPHER_FLAG_GET(default_flags, CIPHER_FLAG_HMAC)); return rc; } @@ -104374,12 +109168,14 @@ int sqlcipher_codec_ctx_init(codec_ctx **iCtx, Db *pDb, Pager *pPager, const voi void sqlcipher_codec_ctx_free(codec_ctx **iCtx) { codec_ctx *ctx = *iCtx; sqlcipher_log(SQLCIPHER_LOG_DEBUG, "codec_ctx_free: iCtx=%p", iCtx); - sqlcipher_free(ctx->kdf_salt, ctx->kdf_salt_sz); - sqlcipher_free(ctx->hmac_kdf_salt, ctx->kdf_salt_sz); - sqlcipher_free(ctx->buffer, ctx->page_sz); + if(ctx->kdf_salt) sqlcipher_free(ctx->kdf_salt, ctx->kdf_salt_sz); + if(ctx->hmac_kdf_salt) sqlcipher_free(ctx->hmac_kdf_salt, ctx->kdf_salt_sz); + if(ctx->buffer) sqlcipher_free(ctx->buffer, ctx->page_sz); - ctx->provider->ctx_free(&ctx->provider_ctx); - sqlcipher_free(ctx->provider, sizeof(sqlcipher_provider)); + if(ctx->provider) { + ctx->provider->ctx_free(&ctx->provider_ctx); + sqlcipher_free(ctx->provider, sizeof(sqlcipher_provider)); + } sqlcipher_cipher_ctx_free(ctx, &ctx->read_ctx); sqlcipher_cipher_ctx_free(ctx, &ctx->write_ctx); @@ -104404,9 +109200,9 @@ static int sqlcipher_page_hmac(codec_ctx *ctx, cipher_ctx *c_ctx, Pgno pgno, uns backwards compatibility on the most popular platforms, but can optionally be configured to use either big endian or native byte ordering via pragma. */ - if(ctx->flags & CIPHER_FLAG_LE_PGNO) { /* compute hmac using little endian pgno*/ + if(SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_LE_PGNO)) { /* compute hmac using little endian pgno*/ sqlcipher_put4byte_le(pgno_raw, pgno); - } else if(ctx->flags & CIPHER_FLAG_BE_PGNO) { /* compute hmac using big endian pgno */ + } else if(SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_BE_PGNO)) { /* compute hmac using big endian pgno */ sqlite3Put4byte(pgno_raw, pgno); /* sqlite3Put4byte converts 32bit uint to big endian */ } else { /* use native byte ordering */ memcpy(pgno_raw, &pgno, sizeof(pgno)); @@ -104462,7 +109258,7 @@ int sqlcipher_page_cipher(codec_ctx *ctx, int for_ctx, Pgno pgno, int mode, int memcpy(iv_out, iv_in, ctx->iv_sz); /* copy the iv from the input to output buffer */ } - if((ctx->flags & CIPHER_FLAG_HMAC) && (mode == CIPHER_DECRYPT) && !ctx->skip_read_hmac) { + if(SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_HMAC) && (mode == CIPHER_DECRYPT)) { if(sqlcipher_page_hmac(ctx, c_ctx, pgno, in, size + ctx->iv_sz, hmac_out) != SQLITE_OK) { sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_page_cipher: hmac operation on decrypt failed for pgno=%d", pgno); goto error; @@ -104475,25 +109271,25 @@ int sqlcipher_page_cipher(codec_ctx *ctx, int for_ctx, Pgno pgno, int mode, int resulted from a short read (i.e. sqlite attempted to pull a page after the end of the file. these short read failures must be ignored for autovaccum mode to work so wipe the output buffer and return SQLITE_OK to skip the decryption step. */ - sqlcipher_log(SQLCIPHER_LOG_WARN, "sqlcipher_page_cipher: zeroed page (short read) for pgno %d, encryption but returning SQLITE_OK", pgno); + sqlcipher_log(SQLCIPHER_LOG_INFO, "sqlcipher_page_cipher: zeroed page (short read) for pgno %d with autovacuum enabled", pgno); sqlcipher_memset(out, 0, page_sz); return SQLITE_OK; } else { /* if the page memory is not all zeros, it means the there was data and a hmac on the page. since the check failed, the page was either tampered with or corrupted. wipe the output buffer, and return SQLITE_ERROR to the caller */ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_page_cipher: hmac check failed for pgno=%d returning SQLITE_ERROR", pgno); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_page_cipher: hmac check failed for pgno=%d", pgno); goto error; } } } if(ctx->provider->cipher(ctx->provider_ctx, mode, c_ctx->key, ctx->key_sz, iv_out, in, size, out) != SQLITE_OK) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_page_cipher: cipher operation mode=%d failed for pgno=%d returning SQLITE_ERROR", mode, pgno); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_page_cipher: cipher operation mode=%d failed for pgno=%d", mode, pgno); goto error; }; - if((ctx->flags & CIPHER_FLAG_HMAC) && (mode == CIPHER_ENCRYPT)) { + if(SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_HMAC) && (mode == CIPHER_ENCRYPT)) { if(sqlcipher_page_hmac(ctx, c_ctx, pgno, out_start, size + ctx->iv_sz, hmac_out) != SQLITE_OK) { sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_page_cipher: hmac operation on encrypt failed for pgno=%d", pgno); goto error; @@ -104531,7 +109327,7 @@ static int sqlcipher_cipher_ctx_key_derive(codec_ctx *ctx, cipher_ctx *c_ctx) { if(c_ctx->pass && c_ctx->pass_sz) { /* if key material is present on the context for derivation */ /* if necessary, initialize the salt from the header or random source */ - if(ctx->need_kdf_salt) { + if(!SQLCIPHER_FLAG_GET(ctx->flags, CIPHER_FLAG_HAS_KDF_SALT)) { if((rc = sqlcipher_codec_ctx_init_kdf_salt(ctx)) != SQLITE_OK) { sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_cipher_ctx_key_derive: error %d from sqlcipher_codec_ctx_init_kdf_salt", rc); return rc; @@ -104541,19 +109337,19 @@ static int sqlcipher_cipher_ctx_key_derive(codec_ctx *ctx, cipher_ctx *c_ctx) { if (c_ctx->pass_sz == ((ctx->key_sz * 2) + 3) && sqlite3StrNICmp((const char *)c_ctx->pass ,"x'", 2) == 0 && cipher_isHex(c_ctx->pass + 2, ctx->key_sz * 2)) { int n = c_ctx->pass_sz - 3; /* adjust for leading x' and tailing ' */ const unsigned char *z = c_ctx->pass + 2; /* adjust lead offset of x' */ - sqlcipher_log(SQLCIPHER_LOG_DEBUG, "cipher_ctx_key_derive: using raw key from hex"); + sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_cipher_ctx_key_derive: using raw key from hex"); cipher_hex2bin(z, n, c_ctx->key); } else if (c_ctx->pass_sz == (((ctx->key_sz + ctx->kdf_salt_sz) * 2) + 3) && sqlite3StrNICmp((const char *)c_ctx->pass ,"x'", 2) == 0 && cipher_isHex(c_ctx->pass + 2, (ctx->key_sz + ctx->kdf_salt_sz) * 2)) { const unsigned char *z = c_ctx->pass + 2; /* adjust lead offset of x' */ - sqlcipher_log(SQLCIPHER_LOG_DEBUG, "cipher_ctx_key_derive: using raw key from hex"); + sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_cipher_ctx_key_derive: using raw key from hex"); cipher_hex2bin(z, (ctx->key_sz * 2), c_ctx->key); cipher_hex2bin(z + (ctx->key_sz * 2), (ctx->kdf_salt_sz * 2), ctx->kdf_salt); } else { - sqlcipher_log(SQLCIPHER_LOG_DEBUG, "cipher_ctx_key_derive: deriving key using full PBKDF2 with %d iterations", ctx->kdf_iter); + sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_cipher_ctx_key_derive: deriving key using full PBKDF2 with %d iterations", ctx->kdf_iter); if(ctx->provider->kdf(ctx->provider_ctx, ctx->kdf_algorithm, c_ctx->pass, c_ctx->pass_sz, ctx->kdf_salt, ctx->kdf_salt_sz, ctx->kdf_iter, ctx->key_sz, c_ctx->key) != SQLITE_OK) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "cipher_ctx_key_derive: error occurred from provider kdf generating encryption key"); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_cipher_ctx_key_derive: error occurred from provider kdf generating encryption key"); return SQLITE_ERROR; } } @@ -104587,7 +109383,7 @@ static int sqlcipher_cipher_ctx_key_derive(codec_ctx *ctx, cipher_ctx *c_ctx) { if(ctx->provider->kdf(ctx->provider_ctx, ctx->kdf_algorithm, c_ctx->key, ctx->key_sz, ctx->hmac_kdf_salt, ctx->kdf_salt_sz, ctx->fast_kdf_iter, ctx->key_sz, c_ctx->hmac_key) != SQLITE_OK) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "cipher_ctx_key_derive: error occurred from provider kdf generating HMAC key"); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_cipher_ctx_key_derive: error occurred from provider kdf generating HMAC key"); return SQLITE_ERROR; } } @@ -104595,7 +109391,7 @@ static int sqlcipher_cipher_ctx_key_derive(codec_ctx *ctx, cipher_ctx *c_ctx) { c_ctx->derive_key = 0; return SQLITE_OK; } - sqlcipher_log(SQLCIPHER_LOG_ERROR, "cipher_ctx_key_derive: key material is not present on the context for key derivation"); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_cipher_ctx_key_derive: key material is not present on the context for key derivation"); return SQLITE_ERROR; } @@ -104758,7 +109554,7 @@ int sqlcipher_codec_ctx_integrity_check(codec_ctx *ctx, Parse *pParse, char *col } if(file_sz % ctx->page_sz != 0) { - result = sqlite3_mprintf("page %d has an invalid size of %lld bytes", page, file_sz - ((file_sz / ctx->page_sz) * ctx->page_sz)); + result = sqlite3_mprintf("page %d has an invalid size of %lld bytes (expected %d bytes)", page, file_sz - ((file_sz / ctx->page_sz) * ctx->page_sz), ctx->page_sz); sqlite3VdbeAddOp4(v, OP_String8, 0, 1, 0, result, P4_DYNAMIC); sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1); } @@ -104794,7 +109590,7 @@ int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) { /* Version 4 - current, no upgrade required, so exit immediately */ rc = sqlcipher_check_connection(db_filename, pass, pass_sz, "", &user_version, &journal_mode); if(rc == SQLITE_OK){ - sqlcipher_log(SQLCIPHER_LOG_DEBUG, "No upgrade required - exiting"); + sqlcipher_log(SQLCIPHER_LOG_INFO, "sqlcipher_codec_ctx_migrate: no upgrade required - exiting"); goto cleanup; } @@ -104802,7 +109598,7 @@ int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) { pragma_compat = sqlite3_mprintf("PRAGMA cipher_compatibility = %d;", i); rc = sqlcipher_check_connection(db_filename, pass, pass_sz, pragma_compat, &user_version, &journal_mode); if(rc == SQLITE_OK) { - sqlcipher_log(SQLCIPHER_LOG_DEBUG, "Version %d format found", i); + sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_codec_ctx_migrate: version %d format found", i); goto migrate; } if(pragma_compat) sqlcipher_free(pragma_compat, sqlite3Strlen30(pragma_compat)); @@ -104810,7 +109606,7 @@ int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) { } /* if we exit the loop normally we failed to determine the version, this is an error */ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "Upgrade format not determined"); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: unable to determine format version for upgrade: this may indicate custom settings were used "); goto handle_error; migrate: @@ -104827,55 +109623,55 @@ int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) { rc = sqlite3_exec(db, pragma_compat, NULL, NULL, NULL); if(rc != SQLITE_OK){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "set compatibility mode failed, error code %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: set compatibility mode failed, error code %d", rc); goto handle_error; } /* force journal mode to DELETE, we will set it back later if different */ rc = sqlite3_exec(db, "PRAGMA journal_mode = delete;", NULL, NULL, NULL); if(rc != SQLITE_OK){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "force journal mode DELETE failed, error code %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: force journal mode DELETE failed, error code %d", rc); goto handle_error; } rc = sqlite3_exec(db, attach_command, NULL, NULL, NULL); if(rc != SQLITE_OK){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "attach failed, error code %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: attach failed, error code %d", rc); goto handle_error; } rc = sqlite3_key_v2(db, "migrate", pass, pass_sz); if(rc != SQLITE_OK){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "keying attached database failed, error code %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: keying attached database failed, error code %d", rc); goto handle_error; } rc = sqlite3_exec(db, "SELECT sqlcipher_export('migrate');", NULL, NULL, NULL); if(rc != SQLITE_OK){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_export failed, error code %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: sqlcipher_export failed, error code %d", rc); goto handle_error; } #ifdef SQLCIPHER_TEST if((sqlcipher_get_test_flags() & TEST_FAIL_MIGRATE) > 0) { rc = SQLITE_ERROR; - sqlcipher_log(SQLCIPHER_LOG_ERROR, "simulated migrate failure, error code %d", rc); + sqlcipher_log(SQLCIPHER_LOG_WARN, "sqlcipher_codec_ctx_migrate: simulated migrate failure, error code %d", rc); goto handle_error; } #endif rc = sqlite3_exec(db, set_user_version, NULL, NULL, NULL); if(rc != SQLITE_OK){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "set user version failed, error code %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: set user version failed, error code %d", rc); goto handle_error; } if( !db->autoCommit ){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "cannot migrate from within a transaction"); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: cannot migrate from within a transaction"); goto handle_error; } if( db->nVdbeActive>1 ){ - sqlcipher_log(SQLCIPHER_LOG_ERROR, "cannot migrate - SQL statements in progress"); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: cannot migrate - SQL statements in progress"); goto handle_error; } @@ -104891,6 +109687,7 @@ int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) { if( rc!=SQLITE_OK ) goto handle_error; sqlcipherCodecGetKey(db, db->nDb - 1, (void**)&keyspec, &keyspec_sz); + SQLCIPHER_FLAG_UNSET(ctx->flags, CIPHER_FLAG_KEY_USED); sqlcipherCodecAttach(db, 0, keyspec, keyspec_sz); srcfile = sqlite3PagerFile(pSrc->pBt->pPager); @@ -104912,13 +109709,13 @@ int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) { if(!MoveFileExW(w_migrated_db_filename, w_db_filename, MOVEFILE_REPLACE_EXISTING)) { rc = SQLITE_ERROR; - sqlcipher_log(SQLCIPHER_LOG_ERROR, "error occurred while renaming %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: error occurred while renaming migration files %d", rc); goto handle_error; } #else sqlcipher_log(SQLCIPHER_LOG_DEBUG, "performing POSIX rename"); if ((rc = rename(migrated_db_filename, db_filename)) != 0) { - sqlcipher_log(SQLCIPHER_LOG_ERROR, "error occurred while renaming %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: error occurred while renaming migration files %d", rc); goto handle_error; } #endif @@ -104950,12 +109747,12 @@ int sqlcipher_codec_ctx_migrate(codec_ctx *ctx) { goto cleanup; handle_error: - sqlcipher_log(SQLCIPHER_LOG_ERROR, "An error occurred attempting to migrate the database - last error %d", rc); + sqlcipher_log(SQLCIPHER_LOG_ERROR, "sqlcipher_codec_ctx_migrate: an error occurred attempting to migrate the database - last error %d", rc); cleanup: if(migrated_db_filename) { int del_rc = sqlite3OsDelete(db->pVfs, migrated_db_filename, 0); - sqlcipher_log(SQLCIPHER_LOG_DEBUG, "deleted migration database: %d", del_rc); + sqlcipher_log(SQLCIPHER_LOG_DEBUG, "sqlcipher_codec_ctx_migrate: deleted migration database: %d", del_rc); } if(pass) sqlcipher_free(pass, pass_sz); @@ -104996,16 +109793,24 @@ int sqlcipher_codec_add_random(codec_ctx *ctx, const char *zRight, int random_sz } #if !defined(SQLITE_OMIT_TRACE) + +#define SQLCIPHER_PROFILE_FMT "Elapsed time:%.3f ms - %s\n" +#define SQLCIPHER_PROFILE_FMT_OSLOG "Elapsed time:%{public}.3f ms - %{public}s\n" + static int sqlcipher_profile_callback(unsigned int trace, void *file, void *stmt, void *run_time){ FILE *f = (FILE*) file; - char *fmt = "Elapsed time:%.3f ms - %s\n"; double elapsed = (*((sqlite3_uint64*)run_time))/1000000.0; -#ifdef __ANDROID__ if(f == NULL) { - __android_log_print(ANDROID_LOG_DEBUG, "sqlcipher", fmt, elapsed, sqlite3_sql((sqlite3_stmt*)stmt)); - } +#if !defined(SQLCIPHER_OMIT_LOG_DEVICE) +#if defined(__ANDROID__) + __android_log_print(ANDROID_LOG_DEBUG, "sqlcipher", SQLCIPHER_PROFILE_FMT, elapsed, sqlite3_sql((sqlite3_stmt*)stmt)); +#elif defined(__APPLE__) + os_log(OS_LOG_DEFAULT, SQLCIPHER_PROFILE_FMT_OSLOG, elapsed, sqlite3_sql((sqlite3_stmt*)stmt)); #endif - if(f) fprintf(f, fmt, elapsed, sqlite3_sql((sqlite3_stmt*)stmt)); +#endif + } else { + fprintf(f, SQLCIPHER_PROFILE_FMT, elapsed, sqlite3_sql((sqlite3_stmt*)stmt)); + } return SQLITE_OK; } #endif @@ -105022,8 +109827,8 @@ int sqlcipher_cipher_profile(sqlite3 *db, const char *destination){ f = stdout; }else if(sqlite3_stricmp(destination, "stderr") == 0){ f = stderr; - }else if(sqlite3_stricmp(destination, "logcat") == 0){ - f = NULL; /* file pointer will be NULL indicating logcat on android */ + }else if(sqlite3_stricmp(destination, "logcat") == 0 || sqlite3_stricmp(destination, "device") == 0){ + f = NULL; /* file pointer will be NULL indicating the device target (i.e. logcat or oslog). We will accept logcat for backwards compatibility */ }else{ #if !defined(SQLCIPHER_PROFILE_USE_FOPEN) && (defined(_WIN32) && (__STDC_VERSION__ > 199901L) || defined(SQLITE_OS_WINRT)) if(fopen_s(&f, destination, "a") != 0) return SQLITE_ERROR; @@ -105052,20 +109857,49 @@ const char* sqlcipher_codec_get_provider_version(codec_ctx *ctx) { void sqlcipher_log(unsigned int level, const char *message, ...) { va_list params; va_start(params, message); + char *formatted = NULL; #ifdef CODEC_DEBUG -#ifdef __ANDROID__ +#if defined(SQLCIPHER_OMIT_LOG_DEVICE) + vfprintf(stderr, message, params); + fprintf(stderr, "\n"); + goto end; +#else +#if defined(__ANDROID__) __android_log_vprint(ANDROID_LOG_DEBUG, "sqlcipher", message, params); + goto end; +#elif defined(__APPLE__) + formatted = sqlite3_vmprintf(message, params); + os_log(OS_LOG_DEFAULT, "%s", formatted); + sqlite3_free(formatted); + goto end; #else vfprintf(stderr, message, params); fprintf(stderr, "\n"); + goto end; +#endif #endif #endif - if(level > sqlcipher_log_level || (sqlcipher_log_logcat == 0 && sqlcipher_log_file == NULL)) { + if(level > sqlcipher_log_level || (sqlcipher_log_device == 0 && sqlcipher_log_file == NULL)) { /* no log target or tag not in included filters */ goto end; } + +#if !defined(SQLCIPHER_OMIT_LOG_DEVICE) + if(sqlcipher_log_device) { +#if defined(__ANDROID__) + __android_log_vprint(ANDROID_LOG_DEBUG, "sqlcipher", message, params); + goto end; +#elif defined(__APPLE__) + formatted = sqlite3_vmprintf(message, params); + os_log(OS_LOG_DEFAULT, "%{public}s", formatted); + sqlite3_free(formatted); + goto end; +#endif + } +#endif + if(sqlcipher_log_file != NULL){ char buffer[24]; struct tm tt; @@ -105090,13 +109924,10 @@ void sqlcipher_log(unsigned int level, const char *message, ...) { fprintf((FILE*)sqlcipher_log_file, "%s.%03d: ", buffer, ms); vfprintf((FILE*)sqlcipher_log_file, message, params); fprintf((FILE*)sqlcipher_log_file, "\n"); + goto end; } } -#ifdef __ANDROID__ - if(sqlcipher_log_logcat) { - __android_log_vprint(ANDROID_LOG_DEBUG, "sqlcipher", message, params); - } -#endif + end: va_end(params); } @@ -105116,10 +109947,11 @@ int sqlcipher_set_log(const char *destination){ fclose((FILE*)sqlcipher_log_file); } sqlcipher_log_file = NULL; - sqlcipher_log_logcat = 0; + sqlcipher_log_device = 0; - if(sqlite3_stricmp(destination, "logcat") == 0){ - sqlcipher_log_logcat = 1; + if(sqlite3_stricmp(destination, "logcat") == 0 || sqlite3_stricmp(destination, "device") == 0){ + /* use the appropriate device log. accept logcat for backwards compatibility */ + sqlcipher_log_device = 1; } else if(sqlite3_stricmp(destination, "stdout") == 0){ sqlcipher_log_file = stdout; }else if(sqlite3_stricmp(destination, "stderr") == 0){ @@ -105785,12 +110617,12 @@ int sqlcipher_nss_setup(sqlcipher_provider *p) { /* #include "sqliteInt.h" */ /* #include "crypto.h" */ /* #include "sqlcipher.h" */ -#include -#include -#include -#include -#include -#include +#include /* amalgamator: dontcache */ +#include /* amalgamator: dontcache */ +#include /* amalgamator: dontcache */ +#include /* amalgamator: dontcache */ +#include /* amalgamator: dontcache */ +#include /* amalgamator: dontcache */ static unsigned int openssl_init_count = 0; @@ -105898,7 +110730,11 @@ static const char* sqlcipher_openssl_get_provider_name(void *ctx) { } static const char* sqlcipher_openssl_get_provider_version(void *ctx) { +#if (defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < 0x10100000L) return OPENSSL_VERSION_TEXT; +#else + return OpenSSL_version(OPENSSL_VERSION); +#endif } /* generate a defined number of random bytes */ @@ -106514,7 +111350,7 @@ static int walkWindowList(Walker *pWalker, Window *pList, int bOneOnly){ ** The return value from this routine is WRC_Abort to abandon the tree walk ** and WRC_Continue to continue. */ -static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ +SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3WalkExprNN(Walker *pWalker, Expr *pExpr){ int rc; testcase( ExprHasProperty(pExpr, EP_TokenOnly) ); testcase( ExprHasProperty(pExpr, EP_Reduced) ); @@ -106523,7 +111359,9 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ if( rc ) return rc & WRC_Abort; if( !ExprHasProperty(pExpr,(EP_TokenOnly|EP_Leaf)) ){ assert( pExpr->x.pList==0 || pExpr->pRight==0 ); - if( pExpr->pLeft && walkExpr(pWalker, pExpr->pLeft) ) return WRC_Abort; + if( pExpr->pLeft && sqlite3WalkExprNN(pWalker, pExpr->pLeft) ){ + return WRC_Abort; + } if( pExpr->pRight ){ assert( !ExprHasProperty(pExpr, EP_WinFunc) ); pExpr = pExpr->pRight; @@ -106547,7 +111385,7 @@ static SQLITE_NOINLINE int walkExpr(Walker *pWalker, Expr *pExpr){ return WRC_Continue; } SQLITE_PRIVATE int sqlite3WalkExpr(Walker *pWalker, Expr *pExpr){ - return pExpr ? walkExpr(pWalker,pExpr) : WRC_Continue; + return pExpr ? sqlite3WalkExprNN(pWalker,pExpr) : WRC_Continue; } /* @@ -106673,7 +111511,7 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){ } /* Increase the walkerDepth when entering a subquery, and -** descrease when leaving the subquery. +** decrease when leaving the subquery. */ SQLITE_PRIVATE int sqlite3WalkerDepthIncrease(Walker *pWalker, Select *pSelect){ UNUSED_PARAMETER(pSelect); @@ -106792,6 +111630,8 @@ static void resolveAlias( assert( iCol>=0 && iColnExpr ); pOrig = pEList->a[iCol].pExpr; assert( pOrig!=0 ); + assert( !ExprHasProperty(pExpr, EP_Reduced|EP_TokenOnly) ); + if( pExpr->pAggInfo ) return; db = pParse->db; pDup = sqlite3ExprDup(db, pOrig, 0); if( db->mallocFailed ){ @@ -106812,28 +111652,41 @@ static void resolveAlias( pExpr->y.pWin->pOwner = pExpr; } } - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprDelete, - pDup); + sqlite3ExprDeferredDelete(pParse, pDup); } } /* -** Subqueries stores the original database, table and column names for their -** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN". -** Check to see if the zSpan given to this routine matches the zDb, zTab, -** and zCol. If any of zDb, zTab, and zCol are NULL then those fields will -** match anything. +** Subqueries store the original database, table and column names for their +** result sets in ExprList.a[].zSpan, in the form "DATABASE.TABLE.COLUMN", +** and mark the expression-list item by setting ExprList.a[].fg.eEName +** to ENAME_TAB. +** +** Check to see if the zSpan/eEName of the expression-list item passed to this +** routine matches the zDb, zTab, and zCol. If any of zDb, zTab, and zCol are +** NULL then those fields will match anything. Return true if there is a match, +** or false otherwise. +** +** SF_NestedFrom subqueries also store an entry for the implicit rowid (or +** _rowid_, or oid) column by setting ExprList.a[].fg.eEName to ENAME_ROWID, +** and setting zSpan to "DATABASE.TABLE.". This type of pItem +** argument matches if zCol is a rowid alias. If it is not NULL, (*pbRowid) +** is set to 1 if there is this kind of match. */ SQLITE_PRIVATE int sqlite3MatchEName( const struct ExprList_item *pItem, const char *zCol, const char *zTab, - const char *zDb + const char *zDb, + int *pbRowid ){ int n; const char *zSpan; - if( pItem->fg.eEName!=ENAME_TAB ) return 0; + int eEName = pItem->fg.eEName; + if( eEName!=ENAME_TAB && (eEName!=ENAME_ROWID || NEVER(pbRowid==0)) ){ + return 0; + } + assert( pbRowid==0 || *pbRowid==0 ); zSpan = pItem->zEName; for(n=0; ALWAYS(zSpan[n]) && zSpan[n]!='.'; n++){} if( zDb && (sqlite3StrNICmp(zSpan, zDb, n)!=0 || zDb[n]!=0) ){ @@ -106845,9 +111698,11 @@ SQLITE_PRIVATE int sqlite3MatchEName( return 0; } zSpan += n+1; - if( zCol && sqlite3StrICmp(zSpan, zCol)!=0 ){ - return 0; + if( zCol ){ + if( eEName==ENAME_TAB && sqlite3StrICmp(zSpan, zCol)!=0 ) return 0; + if( eEName==ENAME_ROWID && sqlite3IsRowid(zCol)==0 ) return 0; } + if( eEName==ENAME_ROWID ) *pbRowid = 1; return 1; } @@ -106880,6 +111735,7 @@ SQLITE_PRIVATE Bitmask sqlite3ExprColUsed(Expr *pExpr){ assert( ExprUseYTab(pExpr) ); pExTab = pExpr->y.pTab; assert( pExTab!=0 ); + assert( n < pExTab->nCol ); if( (pExTab->tabFlags & TF_HasGenerated)!=0 && (pExTab->aCol[n].colFlags & COLFLAG_GENERATED)!=0 ){ @@ -106917,6 +111773,32 @@ static void extendFJMatch( } } +/* +** Return TRUE (non-zero) if zTab is a valid name for the schema table pTab. +*/ +static SQLITE_NOINLINE int isValidSchemaTableName( + const char *zTab, /* Name as it appears in the SQL */ + Table *pTab, /* The schema table we are trying to match */ + Schema *pSchema /* non-NULL if a database qualifier is present */ +){ + const char *zLegacy; + assert( pTab!=0 ); + assert( pTab->tnum==1 ); + if( sqlite3StrNICmp(zTab, "sqlite_", 7)!=0 ) return 0; + zLegacy = pTab->zName; + if( strcmp(zLegacy+7, &LEGACY_TEMP_SCHEMA_TABLE[7])==0 ){ + if( sqlite3StrICmp(zTab+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){ + return 1; + } + if( pSchema==0 ) return 0; + if( sqlite3StrICmp(zTab+7, &LEGACY_SCHEMA_TABLE[7])==0 ) return 1; + if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1; + }else{ + if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1; + } + return 0; +} + /* ** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up ** that name in the set of source tables in pSrcList and make the pExpr @@ -106954,7 +111836,7 @@ static int lookupName( ){ int i, j; /* Loop counters */ int cnt = 0; /* Number of matching column names */ - int cntTab = 0; /* Number of matching table names */ + int cntTab = 0; /* Number of potential "rowid" matches */ int nSubquery = 0; /* How many levels of subquery */ sqlite3 *db = pParse->db; /* The database connection */ SrcItem *pItem; /* Use for looping over pSrcList items */ @@ -107031,54 +111913,66 @@ static int lookupName( assert( pEList!=0 ); assert( pEList->nExpr==pTab->nCol ); for(j=0; jnExpr; j++){ - if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb) ){ + int bRowid = 0; /* True if possible rowid match */ + if( !sqlite3MatchEName(&pEList->a[j], zCol, zTab, zDb, &bRowid) ){ continue; } - if( cnt>0 ){ - if( pItem->fg.isUsing==0 - || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 - ){ - /* Two or more tables have the same column name which is - ** not joined by USING. This is an error. Signal as much - ** by clearing pFJMatch and letting cnt go above 1. */ - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else - if( (pItem->fg.jointype & JT_RIGHT)==0 ){ - /* An INNER or LEFT JOIN. Use the left-most table */ - continue; - }else - if( (pItem->fg.jointype & JT_LEFT)==0 ){ - /* A RIGHT JOIN. Use the right-most table */ - cnt = 0; - sqlite3ExprListDelete(db, pFJMatch); - pFJMatch = 0; - }else{ - /* For a FULL JOIN, we must construct a coalesce() func */ - extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); + if( bRowid==0 ){ + if( cnt>0 ){ + if( pItem->fg.isUsing==0 + || sqlite3IdListIndex(pItem->u3.pUsing, zCol)<0 + ){ + /* Two or more tables have the same column name which is + ** not joined by USING. This is an error. Signal as much + ** by clearing pFJMatch and letting cnt go above 1. */ + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else + if( (pItem->fg.jointype & JT_RIGHT)==0 ){ + /* An INNER or LEFT JOIN. Use the left-most table */ + continue; + }else + if( (pItem->fg.jointype & JT_LEFT)==0 ){ + /* A RIGHT JOIN. Use the right-most table */ + cnt = 0; + sqlite3ExprListDelete(db, pFJMatch); + pFJMatch = 0; + }else{ + /* For a FULL JOIN, we must construct a coalesce() func */ + extendFJMatch(pParse, &pFJMatch, pMatch, pExpr->iColumn); + } } + cnt++; + hit = 1; + }else if( cnt>0 ){ + /* This is a potential rowid match, but there has already been + ** a real match found. So this can be ignored. */ + continue; } - cnt++; - cntTab = 2; + cntTab++; pMatch = pItem; pExpr->iColumn = j; pEList->a[j].fg.bUsed = 1; - hit = 1; + + /* rowid cannot be part of a USING clause - assert() this. */ + assert( bRowid==0 || pEList->a[j].fg.bUsingTerm==0 ); if( pEList->a[j].fg.bUsingTerm ) break; } if( hit || zTab==0 ) continue; } assert( zDb==0 || zTab!=0 ); if( zTab ){ - const char *zTabName; if( zDb ){ if( pTab->pSchema!=pSchema ) continue; if( pSchema==0 && strcmp(zDb,"*")!=0 ) continue; } - zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName; - assert( zTabName!=0 ); - if( sqlite3StrICmp(zTabName, zTab)!=0 ){ - continue; + if( pItem->zAlias!=0 ){ + if( sqlite3StrICmp(zTab, pItem->zAlias)!=0 ){ + continue; + } + }else if( sqlite3StrICmp(zTab, pTab->zName)!=0 ){ + if( pTab->tnum!=1 ) continue; + if( !isValidSchemaTableName(zTab, pTab, pSchema) ) continue; } assert( ExprUseYTab(pExpr) ); if( IN_RENAME_OBJECT && pItem->zAlias ){ @@ -107125,8 +112019,37 @@ static int lookupName( } } if( 0==cnt && VisibleRowid(pTab) ){ + /* pTab is a potential ROWID match. Keep track of it and match + ** the ROWID later if that seems appropriate. (Search for "cntTab" + ** to find related code.) Only allow a ROWID match if there is + ** a single ROWID match candidate. + */ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + /* In SQLITE_ALLOW_ROWID_IN_VIEW mode, allow a ROWID match + ** if there is a single VIEW candidate or if there is a single + ** non-VIEW candidate plus multiple VIEW candidates. In other + ** words non-VIEW candidate terms take precedence over VIEWs. + */ + if( cntTab==0 + || (cntTab==1 + && ALWAYS(pMatch!=0) + && ALWAYS(pMatch->pTab!=0) + && (pMatch->pTab->tabFlags & TF_Ephemeral)!=0 + && (pTab->tabFlags & TF_Ephemeral)==0) + ){ + cntTab = 1; + pMatch = pItem; + }else{ + cntTab++; + } +#else + /* The (much more common) non-SQLITE_ALLOW_ROWID_IN_VIEW case is + ** simpler since we require exactly one candidate, which will + ** always be a non-VIEW + */ cntTab++; pMatch = pItem; +#endif } } if( pMatch ){ @@ -107154,7 +112077,8 @@ static int lookupName( assert( op==TK_DELETE || op==TK_UPDATE || op==TK_INSERT ); if( pParse->bReturning ){ if( (pNC->ncFlags & NC_UBaseReg)!=0 - && (zTab==0 || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) + && ALWAYS(zTab==0 + || sqlite3StrICmp(zTab,pParse->pTriggerTab->zName)==0) ){ pExpr->iTable = op!=TK_DELETE; pTab = pParse->pTriggerTab; @@ -107221,6 +112145,7 @@ static int lookupName( if( pParse->bReturning ){ eNewExprOp = TK_REGISTER; pExpr->op2 = TK_COLUMN; + pExpr->iColumn = iCol; pExpr->iTable = pNC->uNC.iBaseReg + (pTab->nCol+1)*pExpr->iTable + sqlite3TableColumnToStorage(pTab, iCol) + 1; }else{ @@ -107250,14 +112175,14 @@ static int lookupName( ** Perhaps the name is a reference to the ROWID */ if( cnt==0 - && cntTab==1 + && cntTab>=1 && pMatch && (pNC->ncFlags & (NC_IdxExpr|NC_GenCol))==0 && sqlite3IsRowid(zCol) - && ALWAYS(VisibleRowid(pMatch->pTab)) + && ALWAYS(VisibleRowid(pMatch->pTab) || pMatch->fg.isNestedFrom) ){ - cnt = 1; - pExpr->iColumn = -1; + cnt = cntTab; + if( pMatch->fg.isNestedFrom==0 ) pExpr->iColumn = -1; pExpr->affExpr = SQLITE_AFF_INTEGER; } @@ -107416,6 +112341,7 @@ static int lookupName( sqlite3RecordErrorOffsetOfExpr(pParse->db, pExpr); pParse->checkSchema = 1; pTopNC->nNcErr++; + eNewExprOp = TK_NULL; } assert( pFJMatch==0 ); @@ -107442,7 +112368,7 @@ static int lookupName( ** If a generated column is referenced, set bits for every column ** of the table. */ - if( pExpr->iColumn>=0 && pMatch!=0 ){ + if( pExpr->iColumn>=0 && cnt==1 && pMatch!=0 ){ pMatch->colUsed |= sqlite3ExprColUsed(pExpr); } @@ -107620,6 +112546,19 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ ** resolved. This prevents "column" from being counted as having been ** referenced, which might prevent a SELECT from being erroneously ** marked as correlated. + ** + ** 2024-03-28: Beware of aggregates. A bare column of aggregated table + ** can still evaluate to NULL even though it is marked as NOT NULL. + ** Example: + ** + ** CREATE TABLE t1(a INT NOT NULL); + ** SELECT a, a IS NULL, a IS NOT NULL, count(*) FROM t1; + ** + ** The "a IS NULL" and "a IS NOT NULL" expressions cannot be optimized + ** here because at the time this case is hit, we do not yet know whether + ** or not t1 is being aggregated. We have to assume the worst and omit + ** the optimization. The only time it is safe to apply this optimization + ** is within the WHERE clause. */ case TK_NOTNULL: case TK_ISNULL: { @@ -107630,23 +112569,36 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ anRef[i] = p->nRef; } sqlite3WalkExpr(pWalker, pExpr->pLeft); - if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){ - testcase( ExprHasProperty(pExpr, EP_OuterON) ); - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - if( pExpr->op==TK_NOTNULL ){ - pExpr->u.zToken = "true"; - ExprSetProperty(pExpr, EP_IsTrue); - }else{ - pExpr->u.zToken = "false"; - ExprSetProperty(pExpr, EP_IsFalse); - } - pExpr->op = TK_TRUEFALSE; - for(i=0, p=pNC; p && ipNext, i++){ - p->nRef = anRef[i]; + if( IN_RENAME_OBJECT ) return WRC_Prune; + if( sqlite3ExprCanBeNull(pExpr->pLeft) ){ + /* The expression can be NULL. So the optimization does not apply */ + return WRC_Prune; + } + + for(i=0, p=pNC; p; p=p->pNext, i++){ + if( (p->ncFlags & NC_Where)==0 ){ + return WRC_Prune; /* Not in a WHERE clause. Unsafe to optimize. */ } - sqlite3ExprDelete(pParse->db, pExpr->pLeft); - pExpr->pLeft = 0; } + testcase( ExprHasProperty(pExpr, EP_OuterON) ); + assert( !ExprHasProperty(pExpr, EP_IntValue) ); +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x80000 ){ + sqlite3DebugPrintf( + "NOT NULL strength reduction converts the following to %d:\n", + pExpr->op==TK_NOTNULL + ); + sqlite3ShowExpr(pExpr); + } +#endif /* TREETRACE_ENABLED */ + pExpr->u.iValue = (pExpr->op==TK_NOTNULL); + pExpr->flags |= EP_IntValue; + pExpr->op = TK_INTEGER; + for(i=0, p=pNC; p && ipNext, i++){ + p->nRef = anRef[i]; + } + sqlite3ExprDelete(pParse->db, pExpr->pLeft); + pExpr->pLeft = 0; return WRC_Prune; } @@ -107714,6 +112666,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ Window *pWin = (IsWindowFunc(pExpr) ? pExpr->y.pWin : 0); #endif assert( !ExprHasProperty(pExpr, EP_xIsSelect|EP_IntValue) ); + assert( pExpr->pLeft==0 || pExpr->pLeft->op==TK_ORDER ); zId = pExpr->u.zToken; pDef = sqlite3FindFunction(pParse->db, zId, n, enc, 0); if( pDef==0 ){ @@ -107855,6 +112808,10 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ pNC->nNcErr++; } #endif + else if( is_agg==0 && pExpr->pLeft ){ + sqlite3ExprOrderByAggregateError(pParse, pExpr); + pNC->nNcErr++; + } if( is_agg ){ /* Window functions may not be arguments of aggregate functions. ** Or arguments of other window functions. But aggregate functions @@ -107873,6 +112830,11 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ #endif sqlite3WalkExprList(pWalker, pList); if( is_agg ){ + if( pExpr->pLeft ){ + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + sqlite3WalkExprList(pWalker, pExpr->pLeft->x.pList); + } #ifndef SQLITE_OMIT_WINDOWFUNC if( pWin ){ Select *pSel = pNC->pWinSelect; @@ -107901,11 +112863,12 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ while( pNC2 && sqlite3ReferencesSrcList(pParse, pExpr, pNC2->pSrcList)==0 ){ - pExpr->op2++; + pExpr->op2 += (1 + pNC2->nNestedSelect); pNC2 = pNC2->pNext; } assert( pDef!=0 || IN_RENAME_OBJECT ); if( pNC2 && pDef ){ + pExpr->op2 += pNC2->nNestedSelect; assert( SQLITE_FUNC_MINMAX==NC_MinMaxAgg ); assert( SQLITE_FUNC_ANYORDER==NC_OrderAgg ); testcase( (pDef->funcFlags & SQLITE_FUNC_MINMAX)!=0 ); @@ -107942,8 +112905,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){ assert( pNC->nRef>=nRef ); if( nRef!=pNC->nRef ){ ExprSetProperty(pExpr, EP_VarSelect); - pNC->ncFlags |= NC_VarSelect; } + pNC->ncFlags |= NC_Subquery; } break; } @@ -108383,7 +113346,7 @@ static int resolveOrderGroupBy( } for(j=0; jpEList->nExpr; j++){ if( sqlite3ExprCompare(0, pE, pSelect->pEList->a[j].pExpr, -1)==0 ){ - /* Since this expresion is being changed into a reference + /* Since this expression is being changed into a reference ** to an identical expression in the result set, remove all Window ** objects belonging to the expression from the Select.pWin list. */ windowRemoveExprFromSelect(pSelect, pE); @@ -108436,10 +113399,8 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ while( p ){ assert( (p->selFlags & SF_Expanded)!=0 ); assert( (p->selFlags & SF_Resolved)==0 ); - assert( db->suppressErr==0 ); /* SF_Resolved not set if errors suppressed */ p->selFlags |= SF_Resolved; - /* Resolve the expressions in the LIMIT and OFFSET clauses. These ** are not allowed to refer to any names, so pass an empty NameContext. */ @@ -108466,6 +113427,7 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ /* Recursively resolve names in all subqueries in the FROM clause */ + if( pOuterNC ) pOuterNC->nNestedSelect++; for(i=0; ipSrc->nSrc; i++){ SrcItem *pItem = &p->pSrc->a[i]; if( pItem->pSelect && (pItem->pSelect->selFlags & SF_Resolved)==0 ){ @@ -108490,6 +113452,9 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ } } } + if( pOuterNC && ALWAYS(pOuterNC->nNestedSelect>0) ){ + pOuterNC->nNestedSelect--; + } /* Set up the local name-context to pass to sqlite3ResolveExprNames() to ** resolve the result-set expression list. @@ -108533,7 +113498,9 @@ static int resolveSelectStep(Walker *pWalker, Select *p){ } if( sqlite3ResolveExprNames(&sNC, p->pHaving) ) return WRC_Abort; } + sNC.ncFlags |= NC_Where; if( sqlite3ResolveExprNames(&sNC, p->pWhere) ) return WRC_Abort; + sNC.ncFlags &= ~NC_Where; /* Resolve names in table-valued-function arguments */ for(i=0; ipSrc->nSrc; i++){ @@ -108706,7 +113673,8 @@ SQLITE_PRIVATE int sqlite3ResolveExprNames( return SQLITE_ERROR; } #endif - sqlite3WalkExpr(&w, pExpr); + assert( pExpr!=0 ); + sqlite3WalkExprNN(&w, pExpr); #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight -= pExpr->nHeight; #endif @@ -108748,7 +113716,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( return WRC_Abort; } #endif - sqlite3WalkExpr(&w, pExpr); + sqlite3WalkExprNN(&w, pExpr); #if SQLITE_MAX_EXPR_DEPTH>0 w.pParse->nHeight -= pExpr->nHeight; #endif @@ -108770,7 +113738,7 @@ SQLITE_PRIVATE int sqlite3ResolveExprListNames( /* ** Resolve all names in all expressions of a SELECT and in all -** decendents of the SELECT, including compounds off of p->pPrior, +** descendants of the SELECT, including compounds off of p->pPrior, ** subqueries in expressions, and subqueries used as FROM clause ** terms. ** @@ -108897,49 +113865,122 @@ SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table *pTab, int iCol){ */ SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){ int op; - while( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){ - assert( pExpr->op==TK_COLLATE - || pExpr->op==TK_IF_NULL_ROW - || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) ); - pExpr = pExpr->pLeft; - assert( pExpr!=0 ); - } op = pExpr->op; - if( op==TK_REGISTER ) op = pExpr->op2; - if( op==TK_COLUMN || op==TK_AGG_COLUMN ){ - assert( ExprUseYTab(pExpr) ); - if( pExpr->y.pTab ){ + while( 1 /* exit-by-break */ ){ + if( op==TK_COLUMN || (op==TK_AGG_COLUMN && pExpr->y.pTab!=0) ){ + assert( ExprUseYTab(pExpr) ); + assert( pExpr->y.pTab!=0 ); return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); } - } - if( op==TK_SELECT ){ - assert( ExprUseXSelect(pExpr) ); - assert( pExpr->x.pSelect!=0 ); - assert( pExpr->x.pSelect->pEList!=0 ); - assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 ); - return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); - } + if( op==TK_SELECT ){ + assert( ExprUseXSelect(pExpr) ); + assert( pExpr->x.pSelect!=0 ); + assert( pExpr->x.pSelect->pEList!=0 ); + assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 ); + return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr); + } #ifndef SQLITE_OMIT_CAST - if( op==TK_CAST ){ - assert( !ExprHasProperty(pExpr, EP_IntValue) ); - return sqlite3AffinityType(pExpr->u.zToken, 0); - } + if( op==TK_CAST ){ + assert( !ExprHasProperty(pExpr, EP_IntValue) ); + return sqlite3AffinityType(pExpr->u.zToken, 0); + } #endif - if( op==TK_SELECT_COLUMN ){ - assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) ); - assert( pExpr->iColumn < pExpr->iTable ); - assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr ); - return sqlite3ExprAffinity( - pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr - ); - } - if( op==TK_VECTOR ){ - assert( ExprUseXList(pExpr) ); - return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); + if( op==TK_SELECT_COLUMN ){ + assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) ); + assert( pExpr->iColumn < pExpr->iTable ); + assert( pExpr->iColumn >= 0 ); + assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr ); + return sqlite3ExprAffinity( + pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr + ); + } + if( op==TK_VECTOR ){ + assert( ExprUseXList(pExpr) ); + return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr); + } + if( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){ + assert( pExpr->op==TK_COLLATE + || pExpr->op==TK_IF_NULL_ROW + || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) ); + pExpr = pExpr->pLeft; + op = pExpr->op; + continue; + } + if( op!=TK_REGISTER || (op = pExpr->op2)==TK_REGISTER ) break; } return pExpr->affExpr; } +/* +** Make a guess at all the possible datatypes of the result that could +** be returned by an expression. Return a bitmask indicating the answer: +** +** 0x01 Numeric +** 0x02 Text +** 0x04 Blob +** +** If the expression must return NULL, then 0x00 is returned. +*/ +SQLITE_PRIVATE int sqlite3ExprDataType(const Expr *pExpr){ + while( pExpr ){ + switch( pExpr->op ){ + case TK_COLLATE: + case TK_IF_NULL_ROW: + case TK_UPLUS: { + pExpr = pExpr->pLeft; + break; + } + case TK_NULL: { + pExpr = 0; + break; + } + case TK_STRING: { + return 0x02; + } + case TK_BLOB: { + return 0x04; + } + case TK_CONCAT: { + return 0x06; + } + case TK_VARIABLE: + case TK_AGG_FUNCTION: + case TK_FUNCTION: { + return 0x07; + } + case TK_COLUMN: + case TK_AGG_COLUMN: + case TK_SELECT: + case TK_CAST: + case TK_SELECT_COLUMN: + case TK_VECTOR: { + int aff = sqlite3ExprAffinity(pExpr); + if( aff>=SQLITE_AFF_NUMERIC ) return 0x05; + if( aff==SQLITE_AFF_TEXT ) return 0x06; + return 0x07; + } + case TK_CASE: { + int res = 0; + int ii; + ExprList *pList = pExpr->x.pList; + assert( ExprUseXList(pExpr) && pList!=0 ); + assert( pList->nExpr > 0); + for(ii=1; iinExpr; ii+=2){ + res |= sqlite3ExprDataType(pList->a[ii].pExpr); + } + if( pList->nExpr % 2 ){ + res |= sqlite3ExprDataType(pList->a[pList->nExpr-1].pExpr); + } + return res; + } + default: { + return 0x01; + } + } /* End of switch(op) */ + } /* End of while(pExpr) */ + return 0x00; +} + /* ** Set the collating sequence for expression pExpr to be the collating ** sequence named by pToken. Return a pointer to a new Expr node that @@ -108998,9 +114039,10 @@ SQLITE_PRIVATE Expr *sqlite3ExprSkipCollateAndLikely(Expr *pExpr){ assert( pExpr->x.pList->nExpr>0 ); assert( pExpr->op==TK_FUNCTION ); pExpr = pExpr->x.pList->a[0].pExpr; - }else{ - assert( pExpr->op==TK_COLLATE ); + }else if( pExpr->op==TK_COLLATE ){ pExpr = pExpr->pLeft; + }else{ + break; } } return pExpr; @@ -109027,18 +114069,17 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ while( p ){ int op = p->op; if( op==TK_REGISTER ) op = p->op2; - if( op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER ){ + if( (op==TK_AGG_COLUMN && p->y.pTab!=0) + || op==TK_COLUMN || op==TK_TRIGGER + ){ + int j; assert( ExprUseYTab(p) ); - if( p->y.pTab!=0 ){ - /* op==TK_REGISTER && p->y.pTab!=0 happens when pExpr was originally - ** a TK_COLUMN but was previously evaluated and cached in a register */ - int j = p->iColumn; - if( j>=0 ){ - const char *zColl = sqlite3ColumnColl(&p->y.pTab->aCol[j]); - pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); - } - break; + assert( p->y.pTab!=0 ); + if( (j = p->iColumn)>=0 ){ + const char *zColl = sqlite3ColumnColl(&p->y.pTab->aCol[j]); + pColl = sqlite3FindCollSeq(db, ENC(db), zColl, 0); } + break; } if( op==TK_CAST || op==TK_UPLUS ){ p = p->pLeft; @@ -109060,11 +114101,10 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ }else{ Expr *pNext = p->pRight; /* The Expr.x union is never used at the same time as Expr.pRight */ - assert( ExprUseXList(p) ); - assert( p->x.pList==0 || p->pRight==0 ); - if( p->x.pList!=0 && !db->mallocFailed ){ + assert( !ExprUseXList(p) || p->x.pList==0 || p->pRight==0 ); + if( ExprUseXList(p) && p->x.pList!=0 && !db->mallocFailed ){ int i; - for(i=0; ALWAYS(ix.pList->nExpr); i++){ + for(i=0; ix.pList->nExpr; i++){ if( ExprHasProperty(p->x.pList->a[i].pExpr, EP_Collate) ){ pNext = p->x.pList->a[i].pExpr; break; @@ -109086,7 +114126,7 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){ /* ** Return the collation sequence for the expression pExpr. If ** there is no defined collating sequence, return a pointer to the -** defautl collation sequence. +** default collation sequence. ** ** See also: sqlite3ExprCollSeq() ** @@ -109216,7 +114256,7 @@ SQLITE_PRIVATE CollSeq *sqlite3BinaryCompareCollSeq( return pColl; } -/* Expresssion p is a comparison operator. Return a collation sequence +/* Expression p is a comparison operator. Return a collation sequence ** appropriate for the comparison operator. ** ** This is normally just a wrapper around sqlite3BinaryCompareCollSeq(). @@ -109373,6 +114413,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField( */ pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0); if( pRet ){ + ExprSetProperty(pRet, EP_FullSize); pRet->iTable = nField; pRet->iColumn = iField; pRet->pLeft = pVector; @@ -109623,7 +114664,9 @@ static void heightOfSelect(const Select *pSelect, int *pnHeight){ */ static void exprSetHeight(Expr *p){ int nHeight = p->pLeft ? p->pLeft->nHeight : 0; - if( p->pRight && p->pRight->nHeight>nHeight ) nHeight = p->pRight->nHeight; + if( NEVER(p->pRight) && p->pRight->nHeight>nHeight ){ + nHeight = p->pRight->nHeight; + } if( ExprUseXSelect(p) ){ heightOfSelect(p->x.pSelect, &nHeight); }else if( p->x.pList ){ @@ -109670,6 +114713,15 @@ SQLITE_PRIVATE void sqlite3ExprSetHeightAndFlags(Parse *pParse, Expr *p){ #define exprSetHeight(y) #endif /* SQLITE_MAX_EXPR_DEPTH>0 */ +/* +** Set the error offset for an Expr node, if possible. +*/ +SQLITE_PRIVATE void sqlite3ExprSetErrorOffset(Expr *pExpr, int iOfst){ + if( pExpr==0 ) return; + if( NEVER(ExprUseWJoin(pExpr)) ) return; + pExpr->w.iOfst = iOfst; +} + /* ** This routine is the core allocator for Expr nodes. ** @@ -109766,15 +114818,26 @@ SQLITE_PRIVATE void sqlite3ExprAttachSubtrees( sqlite3ExprDelete(db, pLeft); sqlite3ExprDelete(db, pRight); }else{ + assert( ExprUseXList(pRoot) ); + assert( pRoot->x.pSelect==0 ); if( pRight ){ pRoot->pRight = pRight; pRoot->flags |= EP_Propagate & pRight->flags; +#if SQLITE_MAX_EXPR_DEPTH>0 + pRoot->nHeight = pRight->nHeight+1; + }else{ + pRoot->nHeight = 1; +#endif } if( pLeft ){ pRoot->pLeft = pLeft; pRoot->flags |= EP_Propagate & pLeft->flags; +#if SQLITE_MAX_EXPR_DEPTH>0 + if( pLeft->nHeight>=pRoot->nHeight ){ + pRoot->nHeight = pLeft->nHeight+1; + } +#endif } - exprSetHeight(pRoot); } } @@ -109883,9 +114946,9 @@ SQLITE_PRIVATE Select *sqlite3ExprListToValues(Parse *pParse, int nElem, ExprLis ** Join two expressions using an AND operator. If either expression is ** NULL, then just return the other expression. ** -** If one side or the other of the AND is known to be false, then instead -** of returning an AND expression, just return a constant expression with -** a value of false. +** If one side or the other of the AND is known to be false, and neither side +** is part of an ON clause, then instead of returning an AND expression, +** just return a constant expression with a value of false. */ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ sqlite3 *db = pParse->db; @@ -109893,14 +114956,17 @@ SQLITE_PRIVATE Expr *sqlite3ExprAnd(Parse *pParse, Expr *pLeft, Expr *pRight){ return pRight; }else if( pRight==0 ){ return pLeft; - }else if( (ExprAlwaysFalse(pLeft) || ExprAlwaysFalse(pRight)) - && !IN_RENAME_OBJECT - ){ - sqlite3ExprDeferredDelete(pParse, pLeft); - sqlite3ExprDeferredDelete(pParse, pRight); - return sqlite3Expr(db, TK_INTEGER, "0"); }else{ - return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); + u32 f = pLeft->flags | pRight->flags; + if( (f&(EP_OuterON|EP_InnerON|EP_IsFalse))==EP_IsFalse + && !IN_RENAME_OBJECT + ){ + sqlite3ExprDeferredDelete(pParse, pLeft); + sqlite3ExprDeferredDelete(pParse, pRight); + return sqlite3Expr(db, TK_INTEGER, "0"); + }else{ + return sqlite3PExpr(pParse, TK_AND, pLeft, pRight); + } } } @@ -109938,6 +115004,67 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction( return pNew; } +/* +** Report an error when attempting to use an ORDER BY clause within +** the arguments of a non-aggregate function. +*/ +SQLITE_PRIVATE void sqlite3ExprOrderByAggregateError(Parse *pParse, Expr *p){ + sqlite3ErrorMsg(pParse, + "ORDER BY may not be used with non-aggregate %#T()", p + ); +} + +/* +** Attach an ORDER BY clause to a function call. +** +** functionname( arguments ORDER BY sortlist ) +** \_____________________/ \______/ +** pExpr pOrderBy +** +** The ORDER BY clause is inserted into a new Expr node of type TK_ORDER +** and added to the Expr.pLeft field of the parent TK_FUNCTION node. +*/ +SQLITE_PRIVATE void sqlite3ExprAddFunctionOrderBy( + Parse *pParse, /* Parsing context */ + Expr *pExpr, /* The function call to which ORDER BY is to be added */ + ExprList *pOrderBy /* The ORDER BY clause to add */ +){ + Expr *pOB; + sqlite3 *db = pParse->db; + if( NEVER(pOrderBy==0) ){ + assert( db->mallocFailed ); + return; + } + if( pExpr==0 ){ + assert( db->mallocFailed ); + sqlite3ExprListDelete(db, pOrderBy); + return; + } + assert( pExpr->op==TK_FUNCTION ); + assert( pExpr->pLeft==0 ); + assert( ExprUseXList(pExpr) ); + if( pExpr->x.pList==0 || NEVER(pExpr->x.pList->nExpr==0) ){ + /* Ignore ORDER BY on zero-argument aggregates */ + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pOrderBy); + return; + } + if( IsWindowFunc(pExpr) ){ + sqlite3ExprOrderByAggregateError(pParse, pExpr); + sqlite3ExprListDelete(db, pOrderBy); + return; + } + + pOB = sqlite3ExprAlloc(db, TK_ORDER, 0, 0); + if( pOB==0 ){ + sqlite3ExprListDelete(db, pOrderBy); + return; + } + pOB->x.pList = pOrderBy; + assert( ExprUseXList(pOB) ); + pExpr->pLeft = pOB; + ExprSetProperty(pOB, EP_FullSize); +} + /* ** Check to see if a function is usable according to current access ** rules: @@ -110060,6 +115187,7 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n */ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ assert( p!=0 ); + assert( db!=0 ); assert( !ExprUseUValue(p) || p->u.iValue>=0 ); assert( !ExprUseYWin(p) || !ExprUseYSub(p) ); assert( !ExprUseYWin(p) || p->y.pWin!=0 || db->mallocFailed ); @@ -110091,17 +115219,16 @@ static SQLITE_NOINLINE void sqlite3ExprDeleteNN(sqlite3 *db, Expr *p){ #endif } } - if( ExprHasProperty(p, EP_MemToken) ){ - assert( !ExprHasProperty(p, EP_IntValue) ); - sqlite3DbFree(db, p->u.zToken); - } if( !ExprHasProperty(p, EP_Static) ){ - sqlite3DbFreeNN(db, p); + sqlite3DbNNFreeNN(db, p); } } SQLITE_PRIVATE void sqlite3ExprDelete(sqlite3 *db, Expr *p){ if( p ) sqlite3ExprDeleteNN(db, p); } +SQLITE_PRIVATE void sqlite3ExprDeleteGeneric(sqlite3 *db, void *p){ + if( ALWAYS(p) ) sqlite3ExprDeleteNN(db, (Expr*)p); +} /* ** Clear both elements of an OnOrUsing object @@ -110119,7 +115246,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){ /* ** Arrange to cause pExpr to be deleted when the pParse is deleted. ** This is similar to sqlite3ExprDelete() except that the delete is -** deferred untilthe pParse is deleted. +** deferred until the pParse is deleted. ** ** The pExpr might be deleted immediately on an OOM error. ** @@ -110127,8 +115254,7 @@ SQLITE_PRIVATE void sqlite3ClearOnOrUsing(sqlite3 *db, OnOrUsing *p){ ** pExpr to the pParse->pConstExpr list with a register number of 0. */ SQLITE_PRIVATE void sqlite3ExprDeferredDelete(Parse *pParse, Expr *pExpr){ - pParse->pConstExpr = - sqlite3ExprListAppend(pParse, pParse->pConstExpr, pExpr); + sqlite3ParserAddCleanup(pParse, sqlite3ExprDeleteGeneric, pExpr); } /* Invoke sqlite3RenameExprUnmap() and sqlite3ExprDelete() on the @@ -110193,16 +115319,11 @@ static int dupedExprStructSize(const Expr *p, int flags){ assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */ assert( EXPR_FULLSIZE<=0xfff ); assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 ); - if( 0==flags || p->op==TK_SELECT_COLUMN -#ifndef SQLITE_OMIT_WINDOWFUNC - || ExprHasProperty(p, EP_WinFunc) -#endif - ){ + if( 0==flags || ExprHasProperty(p, EP_FullSize) ){ nSize = EXPR_FULLSIZE; }else{ assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) ); assert( !ExprHasProperty(p, EP_OuterON) ); - assert( !ExprHasProperty(p, EP_MemToken) ); assert( !ExprHasVVAProperty(p, EP_NoReduce) ); if( p->pLeft || p->x.pList ){ nSize = EXPR_REDUCEDSIZE | EP_Reduced; @@ -110229,56 +115350,93 @@ static int dupedExprNodeSize(const Expr *p, int flags){ /* ** Return the number of bytes required to create a duplicate of the -** expression passed as the first argument. The second argument is a -** mask containing EXPRDUP_XXX flags. +** expression passed as the first argument. ** ** The value returned includes space to create a copy of the Expr struct ** itself and the buffer referred to by Expr.u.zToken, if any. ** -** If the EXPRDUP_REDUCE flag is set, then the return value includes -** space to duplicate all Expr nodes in the tree formed by Expr.pLeft -** and Expr.pRight variables (but not for any structures pointed to or -** descended from the Expr.x.pList or Expr.x.pSelect variables). +** The return value includes space to duplicate all Expr nodes in the +** tree formed by Expr.pLeft and Expr.pRight, but not any other +** substructure such as Expr.x.pList, Expr.x.pSelect, and Expr.y.pWin. */ -static int dupedExprSize(const Expr *p, int flags){ - int nByte = 0; - if( p ){ - nByte = dupedExprNodeSize(p, flags); - if( flags&EXPRDUP_REDUCE ){ - nByte += dupedExprSize(p->pLeft, flags) + dupedExprSize(p->pRight, flags); - } - } +static int dupedExprSize(const Expr *p){ + int nByte; + assert( p!=0 ); + nByte = dupedExprNodeSize(p, EXPRDUP_REDUCE); + if( p->pLeft ) nByte += dupedExprSize(p->pLeft); + if( p->pRight ) nByte += dupedExprSize(p->pRight); + assert( nByte==ROUND8(nByte) ); return nByte; } /* -** This function is similar to sqlite3ExprDup(), except that if pzBuffer -** is not NULL then *pzBuffer is assumed to point to a buffer large enough -** to store the copy of expression p, the copies of p->u.zToken -** (if applicable), and the copies of the p->pLeft and p->pRight expressions, -** if any. Before returning, *pzBuffer is set to the first byte past the -** portion of the buffer copied into by this function. +** An EdupBuf is a memory allocation used to stored multiple Expr objects +** together with their Expr.zToken content. This is used to help implement +** compression while doing sqlite3ExprDup(). The top-level Expr does the +** allocation for itself and many of its decendents, then passes an instance +** of the structure down into exprDup() so that they decendents can have +** access to that memory. */ -static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){ +typedef struct EdupBuf EdupBuf; +struct EdupBuf { + u8 *zAlloc; /* Memory space available for storage */ +#ifdef SQLITE_DEBUG + u8 *zEnd; /* First byte past the end of memory */ +#endif +}; + +/* +** This function is similar to sqlite3ExprDup(), except that if pEdupBuf +** is not NULL then it points to memory that can be used to store a copy +** of the input Expr p together with its p->u.zToken (if any). pEdupBuf +** is updated with the new buffer tail prior to returning. +*/ +static Expr *exprDup( + sqlite3 *db, /* Database connection (for memory allocation) */ + const Expr *p, /* Expr tree to be duplicated */ + int dupFlags, /* EXPRDUP_REDUCE for compression. 0 if not */ + EdupBuf *pEdupBuf /* Preallocated storage space, or NULL */ +){ Expr *pNew; /* Value to return */ - u8 *zAlloc; /* Memory space from which to build Expr object */ + EdupBuf sEdupBuf; /* Memory space from which to build Expr object */ u32 staticFlag; /* EP_Static if space not obtained from malloc */ + int nToken = -1; /* Space needed for p->u.zToken. -1 means unknown */ assert( db!=0 ); assert( p ); assert( dupFlags==0 || dupFlags==EXPRDUP_REDUCE ); - assert( pzBuffer==0 || dupFlags==EXPRDUP_REDUCE ); + assert( pEdupBuf==0 || dupFlags==EXPRDUP_REDUCE ); /* Figure out where to write the new Expr structure. */ - if( pzBuffer ){ - zAlloc = *pzBuffer; + if( pEdupBuf ){ + sEdupBuf.zAlloc = pEdupBuf->zAlloc; +#ifdef SQLITE_DEBUG + sEdupBuf.zEnd = pEdupBuf->zEnd; +#endif staticFlag = EP_Static; - assert( zAlloc!=0 ); + assert( sEdupBuf.zAlloc!=0 ); + assert( dupFlags==EXPRDUP_REDUCE ); }else{ - zAlloc = sqlite3DbMallocRawNN(db, dupedExprSize(p, dupFlags)); + int nAlloc; + if( dupFlags ){ + nAlloc = dupedExprSize(p); + }else if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nToken = sqlite3Strlen30NN(p->u.zToken)+1; + nAlloc = ROUND8(EXPR_FULLSIZE + nToken); + }else{ + nToken = 0; + nAlloc = ROUND8(EXPR_FULLSIZE); + } + assert( nAlloc==ROUND8(nAlloc) ); + sEdupBuf.zAlloc = sqlite3DbMallocRawNN(db, nAlloc); +#ifdef SQLITE_DEBUG + sEdupBuf.zEnd = sEdupBuf.zAlloc ? sEdupBuf.zAlloc+nAlloc : 0; +#endif + staticFlag = 0; } - pNew = (Expr *)zAlloc; + pNew = (Expr *)sEdupBuf.zAlloc; + assert( EIGHT_BYTE_ALIGNMENT(pNew) ); if( pNew ){ /* Set nNewSize to the size allocated for the structure pointed to @@ -110287,26 +115445,31 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){ ** by the copy of the p->u.zToken string (if any). */ const unsigned nStructSize = dupedExprStructSize(p, dupFlags); - const int nNewSize = nStructSize & 0xfff; - int nToken; - if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ - nToken = sqlite3Strlen30(p->u.zToken) + 1; - }else{ - nToken = 0; + int nNewSize = nStructSize & 0xfff; + if( nToken<0 ){ + if( !ExprHasProperty(p, EP_IntValue) && p->u.zToken ){ + nToken = sqlite3Strlen30(p->u.zToken) + 1; + }else{ + nToken = 0; + } } if( dupFlags ){ + assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >= nNewSize+nToken ); assert( ExprHasProperty(p, EP_Reduced)==0 ); - memcpy(zAlloc, p, nNewSize); + memcpy(sEdupBuf.zAlloc, p, nNewSize); }else{ u32 nSize = (u32)exprStructSize(p); - memcpy(zAlloc, p, nSize); + assert( (int)(sEdupBuf.zEnd - sEdupBuf.zAlloc) >= + (int)EXPR_FULLSIZE+nToken ); + memcpy(sEdupBuf.zAlloc, p, nSize); if( nSizeflags &= ~(EP_Reduced|EP_TokenOnly|EP_Static|EP_MemToken); + pNew->flags &= ~(EP_Reduced|EP_TokenOnly|EP_Static); pNew->flags |= nStructSize & (EP_Reduced|EP_TokenOnly); pNew->flags |= staticFlag; ExprClearVVAProperties(pNew); @@ -110315,44 +115478,50 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){ } /* Copy the p->u.zToken string, if any. */ - if( nToken ){ - char *zToken = pNew->u.zToken = (char*)&zAlloc[nNewSize]; + assert( nToken>=0 ); + if( nToken>0 ){ + char *zToken = pNew->u.zToken = (char*)&sEdupBuf.zAlloc[nNewSize]; memcpy(zToken, p->u.zToken, nToken); + nNewSize += nToken; } + sEdupBuf.zAlloc += ROUND8(nNewSize); + + if( ((p->flags|pNew->flags)&(EP_TokenOnly|EP_Leaf))==0 ){ - if( 0==((p->flags|pNew->flags) & (EP_TokenOnly|EP_Leaf)) ){ /* Fill in the pNew->x.pSelect or pNew->x.pList member. */ if( ExprUseXSelect(p) ){ pNew->x.pSelect = sqlite3SelectDup(db, p->x.pSelect, dupFlags); }else{ - pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, dupFlags); + pNew->x.pList = sqlite3ExprListDup(db, p->x.pList, + p->op!=TK_ORDER ? dupFlags : 0); } - } - /* Fill in pNew->pLeft and pNew->pRight. */ - if( ExprHasProperty(pNew, EP_Reduced|EP_TokenOnly|EP_WinFunc) ){ - zAlloc += dupedExprNodeSize(p, dupFlags); - if( !ExprHasProperty(pNew, EP_TokenOnly|EP_Leaf) ){ - pNew->pLeft = p->pLeft ? - exprDup(db, p->pLeft, EXPRDUP_REDUCE, &zAlloc) : 0; - pNew->pRight = p->pRight ? - exprDup(db, p->pRight, EXPRDUP_REDUCE, &zAlloc) : 0; - } #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(p, EP_WinFunc) ){ pNew->y.pWin = sqlite3WindowDup(db, pNew, p->y.pWin); assert( ExprHasProperty(pNew, EP_WinFunc) ); } #endif /* SQLITE_OMIT_WINDOWFUNC */ - if( pzBuffer ){ - *pzBuffer = zAlloc; - } - }else{ - if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){ - if( pNew->op==TK_SELECT_COLUMN ){ + + /* Fill in pNew->pLeft and pNew->pRight. */ + if( dupFlags ){ + if( p->op==TK_SELECT_COLUMN ){ pNew->pLeft = p->pLeft; - assert( p->pRight==0 || p->pRight==p->pLeft - || ExprHasProperty(p->pLeft, EP_Subquery) ); + assert( p->pRight==0 + || p->pRight==p->pLeft + || ExprHasProperty(p->pLeft, EP_Subquery) ); + }else{ + pNew->pLeft = p->pLeft ? + exprDup(db, p->pLeft, EXPRDUP_REDUCE, &sEdupBuf) : 0; + } + pNew->pRight = p->pRight ? + exprDup(db, p->pRight, EXPRDUP_REDUCE, &sEdupBuf) : 0; + }else{ + if( p->op==TK_SELECT_COLUMN ){ + pNew->pLeft = p->pLeft; + assert( p->pRight==0 + || p->pRight==p->pLeft + || ExprHasProperty(p->pLeft, EP_Subquery) ); }else{ pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0); } @@ -110360,6 +115529,8 @@ static Expr *exprDup(sqlite3 *db, const Expr *p, int dupFlags, u8 **pzBuffer){ } } } + if( pEdupBuf ) memcpy(pEdupBuf, &sEdupBuf, sizeof(sEdupBuf)); + assert( sEdupBuf.zAlloc <= sEdupBuf.zEnd ); return pNew; } @@ -110624,11 +115795,7 @@ SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, const Select *p, int flags) ** initially NULL, then create a new expression list. ** ** The pList argument must be either NULL or a pointer to an ExprList -** obtained from a prior call to sqlite3ExprListAppend(). This routine -** may not be used with an ExprList obtained from sqlite3ExprListDup(). -** Reason: This routine assumes that the number of slots in pList->a[] -** is a power of two. That is true for sqlite3ExprListAppend() returns -** but is not necessarily true from the return value of sqlite3ExprListDup(). +** obtained from a prior call to sqlite3ExprListAppend(). ** ** If a memory allocation error occurs, the entire list is freed and ** NULL is returned. If non-NULL is returned, then it is guaranteed @@ -110882,16 +116049,20 @@ static SQLITE_NOINLINE void exprListDeleteNN(sqlite3 *db, ExprList *pList){ int i = pList->nExpr; struct ExprList_item *pItem = pList->a; assert( pList->nExpr>0 ); + assert( db!=0 ); do{ sqlite3ExprDelete(db, pItem->pExpr); - sqlite3DbFree(db, pItem->zEName); + if( pItem->zEName ) sqlite3DbNNFreeNN(db, pItem->zEName); pItem++; }while( --i>0 ); - sqlite3DbFreeNN(db, pList); + sqlite3DbNNFreeNN(db, pList); } SQLITE_PRIVATE void sqlite3ExprListDelete(sqlite3 *db, ExprList *pList){ if( pList ) exprListDeleteNN(db, pList); } +SQLITE_PRIVATE void sqlite3ExprListDeleteGeneric(sqlite3 *db, void *pList){ + if( ALWAYS(pList) ) exprListDeleteNN(db, (ExprList*)pList); +} /* ** Return the bitwise-OR of all Expr.flags fields in the given @@ -110960,7 +116131,7 @@ SQLITE_PRIVATE int sqlite3ExprIdToTrueFalse(Expr *pExpr){ ** and 0 if it is FALSE. */ SQLITE_PRIVATE int sqlite3ExprTruthValue(const Expr *pExpr){ - pExpr = sqlite3ExprSkipCollate((Expr*)pExpr); + pExpr = sqlite3ExprSkipCollateAndLikely((Expr*)pExpr); assert( pExpr->op==TK_TRUEFALSE ); assert( !ExprHasProperty(pExpr, EP_IntValue) ); assert( sqlite3StrICmp(pExpr->u.zToken,"true")==0 @@ -111147,12 +116318,17 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ } /* -** Check pExpr to see if it is an invariant constraint on data source pSrc. +** Check pExpr to see if it is an constraint on the single data source +** pSrc = &pSrcList->a[iSrc]. In other words, check to see if pExpr +** constrains pSrc but does not depend on any other tables or data +** sources anywhere else in the query. Return true (non-zero) if pExpr +** is a constraint on pSrc only. +** ** This is an optimization. False negatives will perhaps cause slower ** queries, but false positives will yield incorrect answers. So when in ** doubt, return 0. ** -** To be an invariant constraint, the following must be true: +** To be an single-source constraint, the following must be true: ** ** (1) pExpr cannot refer to any table other than pSrc->iCursor. ** @@ -111163,13 +116339,31 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstant(Expr *p, int iCur){ ** ** (4) If pSrc is the right operand of a LEFT JOIN, then... ** (4a) pExpr must come from an ON clause.. - (4b) and specifically the ON clause associated with the LEFT JOIN. +** (4b) and specifically the ON clause associated with the LEFT JOIN. ** ** (5) If pSrc is not the right operand of a LEFT JOIN or the left ** operand of a RIGHT JOIN, then pExpr must be from the WHERE ** clause, not an ON clause. +** +** (6) Either: +** +** (6a) pExpr does not originate in an ON or USING clause, or +** +** (6b) The ON or USING clause from which pExpr is derived is +** not to the left of a RIGHT JOIN (or FULL JOIN). +** +** Without this restriction, accepting pExpr as a single-table +** constraint might move the the ON/USING filter expression +** from the left side of a RIGHT JOIN over to the right side, +** which leads to incorrect answers. See also restriction (9) +** on push-down. */ -SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr *pExpr, const SrcItem *pSrc){ +SQLITE_PRIVATE int sqlite3ExprIsSingleTableConstraint( + Expr *pExpr, /* The constraint */ + const SrcList *pSrcList, /* Complete FROM clause */ + int iSrc /* Which element of pSrcList to use */ +){ + const SrcItem *pSrc = &pSrcList->a[iSrc]; if( pSrc->fg.jointype & JT_LTORJ ){ return 0; /* rule (3) */ } @@ -111179,6 +116373,19 @@ SQLITE_PRIVATE int sqlite3ExprIsTableConstraint(Expr *pExpr, const SrcItem *pSrc }else{ if( ExprHasProperty(pExpr, EP_OuterON) ) return 0; /* rule (5) */ } + if( ExprHasProperty(pExpr, EP_OuterON|EP_InnerON) /* (6a) */ + && (pSrcList->a[0].fg.jointype & JT_LTORJ)!=0 /* Fast pre-test of (6b) */ + ){ + int jj; + for(jj=0; jjw.iJoin==pSrcList->a[jj].iCursor ){ + if( (pSrcList->a[jj].fg.jointype & JT_LTORJ)!=0 ){ + return 0; /* restriction (6) */ + } + break; + } + } + } return sqlite3ExprIsTableConstant(pExpr, pSrc->iCursor); /* rules (1), (2) */ } @@ -111354,10 +116561,14 @@ SQLITE_PRIVATE int sqlite3ExprCanBeNull(const Expr *p){ return 0; case TK_COLUMN: assert( ExprUseYTab(p) ); - return ExprHasProperty(p, EP_CanBeNull) || - p->y.pTab==0 || /* Reference to column of index on expression */ - (p->iColumn>=0 + return ExprHasProperty(p, EP_CanBeNull) + || NEVER(p->y.pTab==0) /* Reference to column of index on expr */ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + || (p->iColumn==XN_ROWID && IsView(p->y.pTab)) +#endif + || (p->iColumn>=0 && p->y.pTab->aCol!=0 /* Possible due to prior error */ + && ALWAYS(p->iColumny.pTab->nCol) && p->y.pTab->aCol[p->iColumn].notNull==0); default: return 1; @@ -111417,11 +116628,32 @@ SQLITE_PRIVATE int sqlite3IsRowid(const char *z){ return 0; } +/* +** Return a pointer to a buffer containing a usable rowid alias for table +** pTab. An alias is usable if there is not an explicit user-defined column +** of the same name. +*/ +SQLITE_PRIVATE const char *sqlite3RowidAlias(Table *pTab){ + const char *azOpt[] = {"_ROWID_", "ROWID", "OID"}; + int ii; + assert( VisibleRowid(pTab) ); + for(ii=0; iinCol; iCol++){ + if( sqlite3_stricmp(azOpt[ii], pTab->aCol[iCol].zCnName)==0 ) break; + } + if( iCol==pTab->nCol ){ + return azOpt[ii]; + } + } + return 0; +} + /* ** pX is the RHS of an IN operator. If pX is a SELECT statement ** that can be simplified to a direct table access, then return ** a pointer to the SELECT statement. If pX is not a SELECT statement, -** or if the SELECT statement needs to be manifested into a transient +** or if the SELECT statement needs to be materialized into a transient ** table, then return NULL. */ #ifndef SQLITE_OMIT_SUBQUERY @@ -111517,7 +116749,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){ ** IN_INDEX_INDEX_ASC - The cursor was opened on an ascending index. ** IN_INDEX_INDEX_DESC - The cursor was opened on a descending index. ** IN_INDEX_EPH - The cursor was opened on a specially created and -** populated epheremal table. +** populated ephemeral table. ** IN_INDEX_NOOP - No cursor was allocated. The IN operator must be ** implemented as a sequence of comparisons. ** @@ -111530,7 +116762,7 @@ static int sqlite3InRhsIsConstant(Expr *pIn){ ** an ephemeral table might need to be generated from the RHS and then ** pX->iTable made to point to the ephemeral table instead of an ** existing table. In this case, the creation and initialization of the -** ephmeral table might be put inside of a subroutine, the EP_Subrtn flag +** ephemeral table might be put inside of a subroutine, the EP_Subrtn flag ** will be set on pX and the pX->y.sub fields will be set to show where ** the subroutine is coded. ** @@ -111542,12 +116774,12 @@ static int sqlite3InRhsIsConstant(Expr *pIn){ ** ** When IN_INDEX_LOOP is used (and the b-tree will be used to iterate ** through the set members) then the b-tree must not contain duplicates. -** An epheremal table will be created unless the selected columns are guaranteed +** An ephemeral table will be created unless the selected columns are guaranteed ** to be unique - either because it is an INTEGER PRIMARY KEY or due to ** a UNIQUE constraint or index. ** ** When IN_INDEX_MEMBERSHIP is used (and the b-tree will be used -** for fast set membership tests) then an epheremal table must +** for fast set membership tests) then an ephemeral table must ** be used unless is a single INTEGER PRIMARY KEY column or an ** index can be found with the specified as its left-most. ** @@ -111707,7 +116939,6 @@ SQLITE_PRIVATE int sqlite3FindInIndex( CollSeq *pReq = sqlite3BinaryCompareCollSeq(pParse, pLhs, pRhs); int j; - assert( pReq!=0 || pRhs->iColumn==XN_ROWID || pParse->nErr ); for(j=0; jaiColumn[j]!=pRhs->iColumn ) continue; assert( pIdx->azColl[j] ); @@ -111881,7 +117112,7 @@ SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){ ** x IN (SELECT a FROM b) -- IN operator with subquery on the right ** ** The pExpr parameter is the IN operator. The cursor number for the -** constructed ephermeral table is returned. The first time the ephemeral +** constructed ephemeral table is returned. The first time the ephemeral ** table is computed, the cursor number is also stored in pExpr->iTable, ** however the cursor number returned might not be the same, as it might ** have been duplicated using OP_OpenDup. @@ -112065,6 +117296,7 @@ SQLITE_PRIVATE void sqlite3CodeRhsOfIN( sqlite3VdbeChangeP4(v, addr, (void *)pKeyInfo, P4_KEYINFO); } if( addrOnce ){ + sqlite3VdbeAddOp1(v, OP_NullRow, iTab); sqlite3VdbeJumpHere(v, addrOnce); /* Subroutine return */ assert( ExprUseYSub(pExpr) ); @@ -112100,6 +117332,9 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ SelectDest dest; /* How to deal with SELECT result */ int nReg; /* Registers to allocate */ Expr *pLimit; /* New limit expression */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExplain; /* Address of OP_Explain instruction */ +#endif Vdbe *v = pParse->pVdbe; assert( v!=0 ); @@ -112152,8 +117387,9 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ ** In both cases, the query is augmented with "LIMIT 1". Any ** preexisting limit is discarded in place of the new LIMIT 1. */ - ExplainQueryPlan((pParse, 1, "%sSCALAR SUBQUERY %d", + ExplainQueryPlan2(addrExplain, (pParse, 1, "%sSCALAR SUBQUERY %d", addrOnce?"":"CORRELATED ", pSel->selId)); + sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, -1); nReg = pExpr->op==TK_SELECT ? pSel->pEList->nExpr : 1; sqlite3SelectDestInit(&dest, 0, pParse->nMem+1); pParse->nMem += nReg; @@ -112178,7 +117414,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ pLimit = sqlite3PExpr(pParse, TK_NE, sqlite3ExprDup(db, pSel->pLimit->pLeft, 0), pLimit); } - sqlite3ExprDelete(db, pSel->pLimit->pLeft); + sqlite3ExprDeferredDelete(pParse, pSel->pLimit->pLeft); pSel->pLimit->pLeft = pLimit; }else{ /* If there is no pre-existing limit add a limit of 1 */ @@ -112196,6 +117432,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){ if( addrOnce ){ sqlite3VdbeJumpHere(v, addrOnce); } + sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); /* Subroutine return */ assert( ExprUseYSub(pExpr) ); @@ -112604,6 +117841,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn( ){ int iAddr; Vdbe *v = pParse->pVdbe; + int nErr = pParse->nErr; assert( v!=0 ); assert( pParse->iSelfTab!=0 ); if( pParse->iSelfTab>0 ){ @@ -112616,6 +117854,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn( sqlite3VdbeAddOp4(v, OP_Affinity, regOut, 1, 0, &pCol->affinity, 1); } if( iAddr ) sqlite3VdbeJumpHere(v, iAddr); + if( pParse->nErr>nErr ) pParse->db->errByteOffset = -1; } #endif /* SQLITE_OMIT_GENERATED_COLUMNS */ @@ -112631,10 +117870,8 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable( ){ Column *pCol; assert( v!=0 ); - if( pTab==0 ){ - sqlite3VdbeAddOp3(v, OP_Column, iTabCur, iCol, regOut); - return; - } + assert( pTab!=0 ); + assert( iCol!=XN_EXPR ); if( iCol<0 || iCol==pTab->iPKey ){ sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut); VdbeComment((v, "%s.rowid", pTab->zName)); @@ -112690,10 +117927,13 @@ SQLITE_PRIVATE int sqlite3ExprCodeGetColumn( u8 p5 /* P5 value for OP_Column + FLAGS */ ){ assert( pParse->pVdbe!=0 ); + assert( (p5 & (OPFLAG_NOCHNG|OPFLAG_TYPEOFARG|OPFLAG_LENGTHARG))==p5 ); + assert( IsVirtual(pTab) || (p5 & OPFLAG_NOCHNG)==0 ); sqlite3ExprCodeGetColumnOfTable(pParse->pVdbe, pTab, iTable, iColumn, iReg); if( p5 ){ - VdbeOp *pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1); + VdbeOp *pOp = sqlite3VdbeGetLastOp(pParse->pVdbe); if( pOp->opcode==OP_Column ) pOp->p5 = p5; + if( pOp->opcode==OP_VColumn ) pOp->p5 = (p5 & OPFLAG_NOCHNG); } return iReg; } @@ -112722,7 +117962,7 @@ static void exprToRegister(Expr *pExpr, int iReg){ /* ** Evaluate an expression (either a vector or a scalar expression) and store -** the result in continguous temporary registers. Return the index of +** the result in contiguous temporary registers. Return the index of ** the first register used to store the result. ** ** If the returned result register is a temporary scalar, then also write @@ -112761,8 +118001,8 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){ ** so that a subsequent copy will not be merged into this one. */ static void setDoNotMergeFlagOnCopy(Vdbe *v){ - if( sqlite3VdbeGetOp(v, -1)->opcode==OP_Copy ){ - sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergable */ + if( sqlite3VdbeGetLastOp(v)->opcode==OP_Copy ){ + sqlite3VdbeChangeP5(v, 1); /* Tag trailing OP_Copy as not mergeable */ } } @@ -112852,13 +118092,13 @@ static int exprCodeInlineFunction( } case INLINEFUNC_implies_nonnull_row: { - /* REsult of sqlite3ExprImpliesNonNullRow() */ + /* Result of sqlite3ExprImpliesNonNullRow() */ Expr *pA1; assert( nFarg==2 ); pA1 = pFarg->a[1].pExpr; if( pA1->op==TK_COLUMN ){ sqlite3VdbeAddOp2(v, OP_Integer, - sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable), + sqlite3ExprImpliesNonNullRow(pFarg->a[0].pExpr,pA1->iTable,1), target); }else{ sqlite3VdbeAddOp2(v, OP_Null, 0, target); @@ -112871,10 +118111,13 @@ static int exprCodeInlineFunction( ** the type affinity of the argument. This is used for testing of ** the SQLite type logic. */ - const char *azAff[] = { "blob", "text", "numeric", "integer", "real" }; + const char *azAff[] = { "blob", "text", "numeric", "integer", + "real", "flexnum" }; char aff; assert( nFarg==1 ); aff = sqlite3ExprAffinity(pFarg->a[0].pExpr); + assert( aff<=SQLITE_AFF_NONE + || (aff>=SQLITE_AFF_BLOB && aff<=SQLITE_AFF_FLEXNUM) ); sqlite3VdbeLoadString(v, target, (aff<=SQLITE_AFF_NONE) ? "none" : azAff[aff-SQLITE_AFF_BLOB]); break; @@ -112884,6 +118127,99 @@ static int exprCodeInlineFunction( return target; } +/* +** Check to see if pExpr is one of the indexed expressions on pParse->pIdxEpr. +** If it is, then resolve the expression by reading from the index and +** return the register into which the value has been read. If pExpr is +** not an indexed expression, then return negative. +*/ +static SQLITE_NOINLINE int sqlite3IndexedExprLookup( + Parse *pParse, /* The parsing context */ + Expr *pExpr, /* The expression to potentially bypass */ + int target /* Where to store the result of the expression */ +){ + IndexedExpr *p; + Vdbe *v; + for(p=pParse->pIdxEpr; p; p=p->pIENext){ + u8 exprAff; + int iDataCur = p->iDataCur; + if( iDataCur<0 ) continue; + if( pParse->iSelfTab ){ + if( p->iDataCur!=pParse->iSelfTab-1 ) continue; + iDataCur = -1; + } + if( sqlite3ExprCompare(0, pExpr, p->pExpr, iDataCur)!=0 ) continue; + assert( p->aff>=SQLITE_AFF_BLOB && p->aff<=SQLITE_AFF_NUMERIC ); + exprAff = sqlite3ExprAffinity(pExpr); + if( (exprAff<=SQLITE_AFF_BLOB && p->aff!=SQLITE_AFF_BLOB) + || (exprAff==SQLITE_AFF_TEXT && p->aff!=SQLITE_AFF_TEXT) + || (exprAff>=SQLITE_AFF_NUMERIC && p->aff!=SQLITE_AFF_NUMERIC) + ){ + /* Affinity mismatch on a generated column */ + continue; + } + + v = pParse->pVdbe; + assert( v!=0 ); + if( p->bMaybeNullRow ){ + /* If the index is on a NULL row due to an outer join, then we + ** cannot extract the value from the index. The value must be + ** computed using the original expression. */ + int addr = sqlite3VdbeCurrentAddr(v); + sqlite3VdbeAddOp3(v, OP_IfNullRow, p->iIdxCur, addr+3, target); + VdbeCoverage(v); + sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target); + VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol)); + sqlite3VdbeGoto(v, 0); + p = pParse->pIdxEpr; + pParse->pIdxEpr = 0; + sqlite3ExprCode(pParse, pExpr, target); + pParse->pIdxEpr = p; + sqlite3VdbeJumpHere(v, addr+2); + }else{ + sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target); + VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol)); + } + return target; + } + return -1; /* Not found */ +} + + +/* +** Expresion pExpr is guaranteed to be a TK_COLUMN or equivalent. This +** function checks the Parse.pIdxPartExpr list to see if this column +** can be replaced with a constant value. If so, it generates code to +** put the constant value in a register (ideally, but not necessarily, +** register iTarget) and returns the register number. +** +** Or, if the TK_COLUMN cannot be replaced by a constant, zero is +** returned. +*/ +static int exprPartidxExprLookup(Parse *pParse, Expr *pExpr, int iTarget){ + IndexedExpr *p; + for(p=pParse->pIdxPartExpr; p; p=p->pIENext){ + if( pExpr->iColumn==p->iIdxCol && pExpr->iTable==p->iDataCur ){ + Vdbe *v = pParse->pVdbe; + int addr = 0; + int ret; + + if( p->bMaybeNullRow ){ + addr = sqlite3VdbeAddOp1(v, OP_IfNullRow, p->iIdxCur); + } + ret = sqlite3ExprCodeTarget(pParse, p->pExpr, iTarget); + sqlite3VdbeAddOp4(pParse->pVdbe, OP_Affinity, ret, 1, 0, + (const char*)&p->aff, 1); + if( addr ){ + sqlite3VdbeJumpHere(v, addr); + sqlite3VdbeChangeP3(v, addr, ret); + } + return ret; + } + } + return 0; +} + /* ** Generate code into the current Vdbe to evaluate the given @@ -112912,25 +118248,44 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) expr_code_doover: if( pExpr==0 ){ op = TK_NULL; + }else if( pParse->pIdxEpr!=0 + && !ExprHasProperty(pExpr, EP_Leaf) + && (r1 = sqlite3IndexedExprLookup(pParse, pExpr, target))>=0 + ){ + return r1; }else{ assert( !ExprHasVVAProperty(pExpr,EP_Immutable) ); op = pExpr->op; } + assert( op!=TK_ORDER ); switch( op ){ case TK_AGG_COLUMN: { AggInfo *pAggInfo = pExpr->pAggInfo; struct AggInfo_col *pCol; assert( pAggInfo!=0 ); - assert( pExpr->iAgg>=0 && pExpr->iAggnColumn ); + assert( pExpr->iAgg>=0 ); + if( pExpr->iAgg>=pAggInfo->nColumn ){ + /* Happens when the left table of a RIGHT JOIN is null and + ** is using an expression index */ + sqlite3VdbeAddOp2(v, OP_Null, 0, target); +#ifdef SQLITE_VDBE_COVERAGE + /* Verify that the OP_Null above is exercised by tests + ** tag-20230325-2 */ + sqlite3VdbeAddOp3(v, OP_NotNull, target, 1, 20230325); + VdbeCoverageNeverTaken(v); +#endif + break; + } pCol = &pAggInfo->aCol[pExpr->iAgg]; if( !pAggInfo->directMode ){ - assert( pCol->iMem>0 ); - return pCol->iMem; + return AggInfoColumnReg(pAggInfo, pExpr->iAgg); }else if( pAggInfo->useSortingIdx ){ Table *pTab = pCol->pTab; sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab, pCol->iSorterColumn, target); - if( pCol->iColumn<0 ){ + if( pTab==0 ){ + /* No comment added */ + }else if( pCol->iColumn<0 ){ VdbeComment((v,"%s.rowid",pTab->zName)); }else{ VdbeComment((v,"%s.%s", @@ -112940,6 +118295,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) } } return target; + }else if( pExpr->y.pTab==0 ){ + /* This case happens when the argument to an aggregate function + ** is rewritten by aggregateConvertIndexedExprRefToColumn() */ + sqlite3VdbeAddOp3(v, OP_Column, pExpr->iTable, pExpr->iColumn, target); + return target; } /* Otherwise, fall thru into the TK_COLUMN case */ /* no break */ deliberate_fall_through @@ -112950,20 +118310,17 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) if( ExprHasProperty(pExpr, EP_FixedCol) ){ /* This COLUMN expression is really a constant due to WHERE clause ** constraints, and that constant is coded by the pExpr->pLeft - ** expresssion. However, make sure the constant has the correct + ** expression. However, make sure the constant has the correct ** datatype by applying the Affinity of the table column to the ** constant. */ int aff; iReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft,target); assert( ExprUseYTab(pExpr) ); - if( pExpr->y.pTab ){ - aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); - }else{ - aff = pExpr->affExpr; - } + assert( pExpr->y.pTab!=0 ); + aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn); if( aff>SQLITE_AFF_BLOB ){ - static const char zAff[] = "B\000C\000D\000E"; + static const char zAff[] = "B\000C\000D\000E\000F"; assert( SQLITE_AFF_BLOB=='A' ); assert( SQLITE_AFF_TEXT=='B' ); sqlite3VdbeAddOp4(v, OP_Affinity, iReg, 1, 0, @@ -113022,13 +118379,16 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) iTab = pParse->iSelfTab - 1; } } + else if( pParse->pIdxPartExpr + && 0!=(r1 = exprPartidxExprLookup(pParse, pExpr, target)) + ){ + return r1; + } assert( ExprUseYTab(pExpr) ); + assert( pExpr->y.pTab!=0 ); iReg = sqlite3ExprCodeGetColumn(pParse, pExpr->y.pTab, pExpr->iColumn, iTab, target, pExpr->op2); - if( pExpr->y.pTab==0 && pExpr->affExpr==SQLITE_AFF_REAL ){ - sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); - } return iReg; } case TK_INTEGER: { @@ -113095,11 +118455,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) #ifndef SQLITE_OMIT_CAST case TK_CAST: { /* Expressions of the form: CAST(pLeft AS token) */ - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - if( inReg!=target ){ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } + sqlite3ExprCode(pParse, pExpr->pLeft, target); + assert( inReg==target ); assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3VdbeAddOp2(v, OP_Cast, target, sqlite3AffinityType(pExpr->u.zToken, 0)); @@ -113242,7 +118599,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) assert( !ExprHasProperty(pExpr, EP_IntValue) ); sqlite3ErrorMsg(pParse, "misuse of aggregate: %#T()", pExpr); }else{ - return pInfo->aFunc[pExpr->iAgg].iMem; + return AggInfoFuncReg(pInfo, pExpr->iAgg); } break; } @@ -113284,7 +118641,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) sqlite3ErrorMsg(pParse, "unknown function: %#T()", pExpr); break; } - if( pDef->funcFlags & SQLITE_FUNC_INLINE ){ + if( (pDef->funcFlags & SQLITE_FUNC_INLINE)!=0 && ALWAYS(pFarg!=0) ){ assert( (pDef->funcFlags & SQLITE_FUNC_UNSAFE)==0 ); assert( (pDef->funcFlags & SQLITE_FUNC_DIRECT)==0 ); return exprCodeInlineFunction(pParse, pFarg, @@ -113310,10 +118667,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) r1 = sqlite3GetTempRange(pParse, nFarg); } - /* For length() and typeof() functions with a column argument, + /* For length() and typeof() and octet_length() functions, ** set the P5 parameter to the OP_Column opcode to OPFLAG_LENGTHARG - ** or OPFLAG_TYPEOFARG respectively, to avoid unnecessary data - ** loading. + ** or OPFLAG_TYPEOFARG or OPFLAG_BYTELENARG respectively, to avoid + ** unnecessary data loading. */ if( (pDef->funcFlags & (SQLITE_FUNC_LENGTH|SQLITE_FUNC_TYPEOF))!=0 ){ u8 exprOp; @@ -113323,14 +118680,16 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) if( exprOp==TK_COLUMN || exprOp==TK_AGG_COLUMN ){ assert( SQLITE_FUNC_LENGTH==OPFLAG_LENGTHARG ); assert( SQLITE_FUNC_TYPEOF==OPFLAG_TYPEOFARG ); - testcase( pDef->funcFlags & OPFLAG_LENGTHARG ); - pFarg->a[0].pExpr->op2 = - pDef->funcFlags & (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG); + assert( SQLITE_FUNC_BYTELEN==OPFLAG_BYTELENARG ); + assert( (OPFLAG_LENGTHARG|OPFLAG_TYPEOFARG)==OPFLAG_BYTELENARG ); + testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_LENGTHARG ); + testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_TYPEOFARG ); + testcase( (pDef->funcFlags & OPFLAG_BYTELENARG)==OPFLAG_BYTELENARG); + pFarg->a[0].pExpr->op2 = pDef->funcFlags & OPFLAG_BYTELENARG; } } - sqlite3ExprCodeExprList(pParse, pFarg, r1, 0, - SQLITE_ECEL_DUP|SQLITE_ECEL_FACTOR); + sqlite3ExprCodeExprList(pParse, pFarg, r1, 0, SQLITE_ECEL_FACTOR); }else{ r1 = 0; } @@ -113431,17 +118790,16 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) return target; } case TK_COLLATE: { - if( !ExprHasProperty(pExpr, EP_Collate) - && ALWAYS(pExpr->pLeft) - && pExpr->pLeft->op==TK_FUNCTION - ){ - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); - if( inReg!=target ){ - sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target); - inReg = target; - } - sqlite3VdbeAddOp1(v, OP_ClrSubtype, inReg); - return inReg; + if( !ExprHasProperty(pExpr, EP_Collate) ){ + /* A TK_COLLATE Expr node without the EP_Collate tag is a so-called + ** "SOFT-COLLATE" that is added to constraints that are pushed down + ** from outer queries into sub-queries by the push-down optimization. + ** Clear subtypes as subtypes may not cross a subquery boundary. + */ + assert( pExpr->pLeft ); + sqlite3ExprCode(pParse, pExpr->pLeft, target); + sqlite3VdbeAddOp1(v, OP_ClrSubtype, target); + return target; }else{ pExpr = pExpr->pLeft; goto expr_code_doover; /* 2018-04-28: Prevent deep recursion. */ @@ -113527,16 +118885,34 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) case TK_IF_NULL_ROW: { int addrINR; u8 okConstFactor = pParse->okConstFactor; - addrINR = sqlite3VdbeAddOp1(v, OP_IfNullRow, pExpr->iTable); - /* Temporarily disable factoring of constant expressions, since - ** even though expressions may appear to be constant, they are not - ** really constant because they originate from the right-hand side - ** of a LEFT JOIN. */ - pParse->okConstFactor = 0; - inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target); + AggInfo *pAggInfo = pExpr->pAggInfo; + if( pAggInfo ){ + assert( pExpr->iAgg>=0 && pExpr->iAggnColumn ); + if( !pAggInfo->directMode ){ + inReg = AggInfoColumnReg(pAggInfo, pExpr->iAgg); + break; + } + if( pExpr->pAggInfo->useSortingIdx ){ + sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab, + pAggInfo->aCol[pExpr->iAgg].iSorterColumn, + target); + inReg = target; + break; + } + } + addrINR = sqlite3VdbeAddOp3(v, OP_IfNullRow, pExpr->iTable, 0, target); + /* The OP_IfNullRow opcode above can overwrite the result register with + ** NULL. So we have to ensure that the result register is not a value + ** that is suppose to be a constant. Two defenses are needed: + ** (1) Temporarily disable factoring of constant expressions + ** (2) Make sure the computed value really is stored in register + ** "target" and not someplace else. + */ + pParse->okConstFactor = 0; /* note (1) above */ + sqlite3ExprCode(pParse, pExpr->pLeft, target); + assert( target==inReg ); pParse->okConstFactor = okConstFactor; sqlite3VdbeJumpHere(v, addrINR); - sqlite3VdbeChangeP3(v, addrINR, inReg); break; } @@ -113668,9 +119044,9 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target) ** once. If no functions are involved, then factor the code out and put it at ** the end of the prepared statement in the initialization section. ** -** If regDest>=0 then the result is always stored in that register and the +** If regDest>0 then the result is always stored in that register and the ** result is not reusable. If regDest<0 then this routine is free to -** store the value whereever it wants. The register where the expression +** store the value wherever it wants. The register where the expression ** is stored is returned. When regDest<0, two identical expressions might ** code to the same register, if they do not contain function calls and hence ** are factored out into the initialization section at the end of the @@ -113683,6 +119059,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeRunJustOnce( ){ ExprList *p; assert( ConstFactorOk(pParse) ); + assert( regDest!=0 ); p = pParse->pConstExpr; if( regDest<0 && p ){ struct ExprList_item *pItem; @@ -113773,7 +119150,11 @@ SQLITE_PRIVATE void sqlite3ExprCode(Parse *pParse, Expr *pExpr, int target){ inReg = sqlite3ExprCodeTarget(pParse, pExpr, target); if( inReg!=target ){ u8 op; - if( ALWAYS(pExpr) && ExprHasProperty(pExpr,EP_Subquery) ){ + Expr *pX = sqlite3ExprSkipCollateAndLikely(pExpr); + testcase( pX!=pExpr ); + if( ALWAYS(pX) + && (ExprHasProperty(pX,EP_Subquery) || pX->op==TK_REGISTER) + ){ op = OP_Copy; }else{ op = OP_SCopy; @@ -113868,7 +119249,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList( if( inReg!=target+i ){ VdbeOp *pOp; if( copyOp==OP_Copy - && (pOp=sqlite3VdbeGetOp(v, -1))->opcode==OP_Copy + && (pOp=sqlite3VdbeGetLastOp(v))->opcode==OP_Copy && pOp->p1+pOp->p3+1==inReg && pOp->p2+pOp->p3+1==target+i && pOp->p5==0 /* The do-not-merge flag must be clear */ @@ -114067,6 +119448,7 @@ SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse *pParse, Expr *pExpr, int dest, int assert( TK_ISNULL==OP_IsNull ); testcase( op==TK_ISNULL ); assert( TK_NOTNULL==OP_NotNull ); testcase( op==TK_NOTNULL ); r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); VdbeCoverageIf(v, op==TK_ISNULL); VdbeCoverageIf(v, op==TK_NOTNULL); @@ -114241,6 +119623,7 @@ SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse *pParse, Expr *pExpr, int dest, int case TK_ISNULL: case TK_NOTNULL: { r1 = sqlite3ExprCodeTemp(pParse, pExpr->pLeft, ®Free1); + sqlite3VdbeTypeofColumn(v, r1); sqlite3VdbeAddOp2(v, op, r1, dest); testcase( op==TK_ISNULL ); VdbeCoverageIf(v, op==TK_ISNULL); testcase( op==TK_NOTNULL ); VdbeCoverageIf(v, op==TK_NOTNULL); @@ -114394,7 +119777,13 @@ SQLITE_PRIVATE int sqlite3ExprCompare( if( pB->op==TK_COLLATE && sqlite3ExprCompare(pParse, pA,pB->pLeft,iTab)<2 ){ return 1; } - return 2; + if( pA->op==TK_AGG_COLUMN && pB->op==TK_COLUMN + && pB->iTable<0 && pA->iTable==iTab + ){ + /* fall through */ + }else{ + return 2; + } } assert( !ExprHasProperty(pA, EP_IntValue) ); assert( !ExprHasProperty(pB, EP_IntValue) ); @@ -114484,8 +119873,8 @@ SQLITE_PRIVATE int sqlite3ExprListCompare(const ExprList *pA, const ExprList *pB */ SQLITE_PRIVATE int sqlite3ExprCompareSkip(Expr *pA,Expr *pB, int iTab){ return sqlite3ExprCompare(0, - sqlite3ExprSkipCollateAndLikely(pA), - sqlite3ExprSkipCollateAndLikely(pB), + sqlite3ExprSkipCollate(pA), + sqlite3ExprSkipCollate(pB), iTab); } @@ -114578,7 +119967,7 @@ static int exprImpliesNotNull( ** pE1: x!=123 pE2: x IS NOT NULL Result: true ** pE1: x!=?1 pE2: x IS NOT NULL Result: true ** pE1: x IS NULL pE2: x IS NOT NULL Result: false -** pE1: x IS ?2 pE2: x IS NOT NULL Reuslt: false +** pE1: x IS ?2 pE2: x IS NOT NULL Result: false ** ** When comparing TK_COLUMN nodes between pE1 and pE2, if pE2 has ** Expr.iTable<0 then assume a table number given by iTab. @@ -114615,11 +120004,29 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr( return 0; } +/* This is a helper function to impliesNotNullRow(). In this routine, +** set pWalker->eCode to one only if *both* of the input expressions +** separately have the implies-not-null-row property. +*/ +static void bothImplyNotNullRow(Walker *pWalker, Expr *pE1, Expr *pE2){ + if( pWalker->eCode==0 ){ + sqlite3WalkExpr(pWalker, pE1); + if( pWalker->eCode ){ + pWalker->eCode = 0; + sqlite3WalkExpr(pWalker, pE2); + } + } +} + /* ** This is the Expr node callback for sqlite3ExprImpliesNonNullRow(). ** If the expression node requires that the table at pWalker->iCur ** have one or more non-NULL column, then set pWalker->eCode to 1 and abort. ** +** pWalker->mWFlags is non-zero if this inquiry is being undertaking on +** behalf of a RIGHT JOIN (or FULL JOIN). That makes a difference when +** evaluating terms in the ON clause of an inner join. +** ** This routine controls an optimization. False positives (setting ** pWalker->eCode to 1 when it should not be) are deadly, but false-negatives ** (never setting pWalker->eCode) is a harmless missed optimization. @@ -114628,28 +120035,33 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){ testcase( pExpr->op==TK_AGG_COLUMN ); testcase( pExpr->op==TK_AGG_FUNCTION ); if( ExprHasProperty(pExpr, EP_OuterON) ) return WRC_Prune; + if( ExprHasProperty(pExpr, EP_InnerON) && pWalker->mWFlags ){ + /* If iCur is used in an inner-join ON clause to the left of a + ** RIGHT JOIN, that does *not* mean that the table must be non-null. + ** But it is difficult to check for that condition precisely. + ** To keep things simple, any use of iCur from any inner-join is + ** ignored while attempting to simplify a RIGHT JOIN. */ + return WRC_Prune; + } switch( pExpr->op ){ case TK_ISNOT: case TK_ISNULL: case TK_NOTNULL: case TK_IS: - case TK_OR: case TK_VECTOR: - case TK_CASE: - case TK_IN: case TK_FUNCTION: case TK_TRUTH: + case TK_CASE: testcase( pExpr->op==TK_ISNOT ); testcase( pExpr->op==TK_ISNULL ); testcase( pExpr->op==TK_NOTNULL ); testcase( pExpr->op==TK_IS ); - testcase( pExpr->op==TK_OR ); testcase( pExpr->op==TK_VECTOR ); - testcase( pExpr->op==TK_CASE ); - testcase( pExpr->op==TK_IN ); testcase( pExpr->op==TK_FUNCTION ); testcase( pExpr->op==TK_TRUTH ); + testcase( pExpr->op==TK_CASE ); return WRC_Prune; + case TK_COLUMN: if( pWalker->u.iCur==pExpr->iTable ){ pWalker->eCode = 1; @@ -114657,21 +120069,38 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){ } return WRC_Prune; + case TK_OR: case TK_AND: - if( pWalker->eCode==0 ){ + /* Both sides of an AND or OR must separately imply non-null-row. + ** Consider these cases: + ** 1. NOT (x AND y) + ** 2. x OR y + ** If only one of x or y is non-null-row, then the overall expression + ** can be true if the other arm is false (case 1) or true (case 2). + */ + testcase( pExpr->op==TK_OR ); + testcase( pExpr->op==TK_AND ); + bothImplyNotNullRow(pWalker, pExpr->pLeft, pExpr->pRight); + return WRC_Prune; + + case TK_IN: + /* Beware of "x NOT IN ()" and "x NOT IN (SELECT 1 WHERE false)", + ** both of which can be true. But apart from these cases, if + ** the left-hand side of the IN is NULL then the IN itself will be + ** NULL. */ + if( ExprUseXList(pExpr) && ALWAYS(pExpr->x.pList->nExpr>0) ){ sqlite3WalkExpr(pWalker, pExpr->pLeft); - if( pWalker->eCode ){ - pWalker->eCode = 0; - sqlite3WalkExpr(pWalker, pExpr->pRight); - } } return WRC_Prune; case TK_BETWEEN: - if( sqlite3WalkExpr(pWalker, pExpr->pLeft)==WRC_Abort ){ - assert( pWalker->eCode ); - return WRC_Abort; - } + /* In "x NOT BETWEEN y AND z" either x must be non-null-row or else + ** both y and z must be non-null row */ + assert( ExprUseXList(pExpr) ); + assert( pExpr->x.pList->nExpr==2 ); + sqlite3WalkExpr(pWalker, pExpr->pLeft); + bothImplyNotNullRow(pWalker, pExpr->x.pList->a[0].pExpr, + pExpr->x.pList->a[1].pExpr); return WRC_Prune; /* Virtual tables are allowed to use constraints like x=NULL. So @@ -114696,10 +120125,10 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){ assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) ); assert( pRight->op!=TK_COLUMN || ExprUseYTab(pRight) ); if( (pLeft->op==TK_COLUMN - && pLeft->y.pTab!=0 + && ALWAYS(pLeft->y.pTab!=0) && IsVirtual(pLeft->y.pTab)) || (pRight->op==TK_COLUMN - && pRight->y.pTab!=0 + && ALWAYS(pRight->y.pTab!=0) && IsVirtual(pRight->y.pTab)) ){ return WRC_Prune; @@ -114733,7 +120162,7 @@ static int impliesNotNullRow(Walker *pWalker, Expr *pExpr){ ** be non-NULL, then the LEFT JOIN can be safely converted into an ** ordinary join. */ -SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){ +SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab, int isRJ){ Walker w; p = sqlite3ExprSkipCollateAndLikely(p); if( p==0 ) return 0; @@ -114741,7 +120170,7 @@ SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){ p = p->pLeft; }else{ while( p->op==TK_AND ){ - if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab) ) return 1; + if( sqlite3ExprImpliesNonNullRow(p->pLeft, iTab, isRJ) ) return 1; p = p->pRight; } } @@ -114749,6 +120178,7 @@ SQLITE_PRIVATE int sqlite3ExprImpliesNonNullRow(Expr *p, int iTab){ w.xSelectCallback = 0; w.xSelectCallback2 = 0; w.eCode = 0; + w.mWFlags = isRJ!=0; w.u.iCur = iTab; sqlite3WalkExpr(&w, p); return w.eCode; @@ -114809,7 +120239,7 @@ SQLITE_PRIVATE int sqlite3ExprCoveredByIndex( } -/* Structure used to pass information throught the Walker in order to +/* Structure used to pass information throughout the Walker in order to ** implement sqlite3ReferencesSrcList(). */ struct RefSrcList { @@ -114904,6 +120334,7 @@ static int exprRefToSrcList(Walker *pWalker, Expr *pExpr){ SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList *pSrcList){ Walker w; struct RefSrcList x; + assert( pParse->db!=0 ); memset(&w, 0, sizeof(w)); memset(&x, 0, sizeof(x)); w.xExprCallback = exprRefToSrcList; @@ -114915,12 +120346,18 @@ SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList assert( pExpr->op==TK_AGG_FUNCTION ); assert( ExprUseXList(pExpr) ); sqlite3WalkExprList(&w, pExpr->x.pList); + if( pExpr->pLeft ){ + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + assert( pExpr->pLeft->x.pList!=0 ); + sqlite3WalkExprList(&w, pExpr->pLeft->x.pList); + } #ifndef SQLITE_OMIT_WINDOWFUNC if( ExprHasProperty(pExpr, EP_WinFunc) ){ sqlite3WalkExpr(&w, pExpr->y.pWin->pFilter); } #endif - sqlite3DbFree(pParse->db, x.aiExclude); + if( x.aiExclude ) sqlite3DbNNFreeNN(pParse->db, x.aiExclude); if( w.eCode & 0x01 ){ return 1; }else if( w.eCode ){ @@ -114938,10 +120375,8 @@ SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList ** it does, make a copy. This is done because the pExpr argument is ** subject to change. ** -** The copy is stored on pParse->pConstExpr with a register number of 0. -** This will cause the expression to be deleted automatically when the -** Parse object is destroyed, but the zero register number means that it -** will not generate any code in the preamble. +** The copy is scheduled for deletion using the sqlite3ExprDeferredDelete() +** which builds on the sqlite3ParserAddCleanup() mechanism. */ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ if( ALWAYS(!ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced)) @@ -114951,10 +120386,11 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ int iAgg = pExpr->iAgg; Parse *pParse = pWalker->pParse; sqlite3 *db = pParse->db; - assert( pExpr->op==TK_AGG_COLUMN || pExpr->op==TK_AGG_FUNCTION ); - if( pExpr->op==TK_AGG_COLUMN ){ - assert( iAgg>=0 && iAggnColumn ); - if( pAggInfo->aCol[iAgg].pCExpr==pExpr ){ + assert( iAgg>=0 ); + if( pExpr->op!=TK_AGG_FUNCTION ){ + if( iAggnColumn + && pAggInfo->aCol[iAgg].pCExpr==pExpr + ){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aCol[iAgg].pCExpr = pExpr; @@ -114962,8 +120398,10 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){ } } }else{ - assert( iAgg>=0 && iAggnFunc ); - if( pAggInfo->aFunc[iAgg].pFExpr==pExpr ){ + assert( pExpr->op==TK_AGG_FUNCTION ); + if( ALWAYS(iAggnFunc) + && pAggInfo->aFunc[iAgg].pFExpr==pExpr + ){ pExpr = sqlite3ExprDup(db, pExpr, 0); if( pExpr ){ pAggInfo->aFunc[iAgg].pFExpr = pExpr; @@ -115018,6 +120456,74 @@ static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){ return i; } +/* +** Search the AggInfo object for an aCol[] entry that has iTable and iColumn. +** Return the index in aCol[] of the entry that describes that column. +** +** If no prior entry is found, create a new one and return -1. The +** new column will have an index of pAggInfo->nColumn-1. +*/ +static void findOrCreateAggInfoColumn( + Parse *pParse, /* Parsing context */ + AggInfo *pAggInfo, /* The AggInfo object to search and/or modify */ + Expr *pExpr /* Expr describing the column to find or insert */ +){ + struct AggInfo_col *pCol; + int k; + + assert( pAggInfo->iFirstReg==0 ); + pCol = pAggInfo->aCol; + for(k=0; knColumn; k++, pCol++){ + if( pCol->pCExpr==pExpr ) return; + if( pCol->iTable==pExpr->iTable + && pCol->iColumn==pExpr->iColumn + && pExpr->op!=TK_IF_NULL_ROW + ){ + goto fix_up_expr; + } + } + k = addAggInfoColumn(pParse->db, pAggInfo); + if( k<0 ){ + /* OOM on resize */ + assert( pParse->db->mallocFailed ); + return; + } + pCol = &pAggInfo->aCol[k]; + assert( ExprUseYTab(pExpr) ); + pCol->pTab = pExpr->y.pTab; + pCol->iTable = pExpr->iTable; + pCol->iColumn = pExpr->iColumn; + pCol->iSorterColumn = -1; + pCol->pCExpr = pExpr; + if( pAggInfo->pGroupBy && pExpr->op!=TK_IF_NULL_ROW ){ + int j, n; + ExprList *pGB = pAggInfo->pGroupBy; + struct ExprList_item *pTerm = pGB->a; + n = pGB->nExpr; + for(j=0; jpExpr; + if( pE->op==TK_COLUMN + && pE->iTable==pExpr->iTable + && pE->iColumn==pExpr->iColumn + ){ + pCol->iSorterColumn = j; + break; + } + } + } + if( pCol->iSorterColumn<0 ){ + pCol->iSorterColumn = pAggInfo->nSortingColumn++; + } +fix_up_expr: + ExprSetVVAProperty(pExpr, EP_NoReduce); + assert( pExpr->pAggInfo==0 || pExpr->pAggInfo==pAggInfo ); + pExpr->pAggInfo = pAggInfo; + if( pExpr->op==TK_COLUMN ){ + pExpr->op = TK_AGG_COLUMN; + } + pExpr->iAgg = (i16)k; +} + /* ** This is the xExprCallback for a tree walker. It is used to ** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates @@ -115031,87 +120537,76 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ AggInfo *pAggInfo = pNC->uNC.pAggInfo; assert( pNC->ncFlags & NC_UAggInfo ); + assert( pAggInfo->iFirstReg==0 ); switch( pExpr->op ){ + default: { + IndexedExpr *pIEpr; + Expr tmp; + assert( pParse->iSelfTab==0 ); + if( (pNC->ncFlags & NC_InAggFunc)==0 ) break; + if( pParse->pIdxEpr==0 ) break; + for(pIEpr=pParse->pIdxEpr; pIEpr; pIEpr=pIEpr->pIENext){ + int iDataCur = pIEpr->iDataCur; + if( iDataCur<0 ) continue; + if( sqlite3ExprCompare(0, pExpr, pIEpr->pExpr, iDataCur)==0 ) break; + } + if( pIEpr==0 ) break; + if( NEVER(!ExprUseYTab(pExpr)) ) break; + for(i=0; inSrc; i++){ + if( pSrcList->a[0].iCursor==pIEpr->iDataCur ) break; + } + if( i>=pSrcList->nSrc ) break; + if( NEVER(pExpr->pAggInfo!=0) ) break; /* Resolved by outer context */ + if( pParse->nErr ){ return WRC_Abort; } + + /* If we reach this point, it means that expression pExpr can be + ** translated into a reference to an index column as described by + ** pIEpr. + */ + memset(&tmp, 0, sizeof(tmp)); + tmp.op = TK_AGG_COLUMN; + tmp.iTable = pIEpr->iIdxCur; + tmp.iColumn = pIEpr->iIdxCol; + findOrCreateAggInfoColumn(pParse, pAggInfo, &tmp); + if( pParse->nErr ){ return WRC_Abort; } + assert( pAggInfo->aCol!=0 ); + assert( tmp.iAggnColumn ); + pAggInfo->aCol[tmp.iAgg].pCExpr = pExpr; + pExpr->pAggInfo = pAggInfo; + pExpr->iAgg = tmp.iAgg; + return WRC_Prune; + } + case TK_IF_NULL_ROW: case TK_AGG_COLUMN: case TK_COLUMN: { testcase( pExpr->op==TK_AGG_COLUMN ); testcase( pExpr->op==TK_COLUMN ); + testcase( pExpr->op==TK_IF_NULL_ROW ); /* Check to see if the column is in one of the tables in the FROM ** clause of the aggregate query */ if( ALWAYS(pSrcList!=0) ){ SrcItem *pItem = pSrcList->a; for(i=0; inSrc; i++, pItem++){ - struct AggInfo_col *pCol; assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) ); if( pExpr->iTable==pItem->iCursor ){ - /* If we reach this point, it means that pExpr refers to a table - ** that is in the FROM clause of the aggregate query. - ** - ** Make an entry for the column in pAggInfo->aCol[] if there - ** is not an entry there already. - */ - int k; - pCol = pAggInfo->aCol; - for(k=0; knColumn; k++, pCol++){ - if( pCol->iTable==pExpr->iTable && - pCol->iColumn==pExpr->iColumn ){ - break; - } - } - if( (k>=pAggInfo->nColumn) - && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0 - ){ - pCol = &pAggInfo->aCol[k]; - assert( ExprUseYTab(pExpr) ); - pCol->pTab = pExpr->y.pTab; - pCol->iTable = pExpr->iTable; - pCol->iColumn = pExpr->iColumn; - pCol->iMem = ++pParse->nMem; - pCol->iSorterColumn = -1; - pCol->pCExpr = pExpr; - if( pAggInfo->pGroupBy ){ - int j, n; - ExprList *pGB = pAggInfo->pGroupBy; - struct ExprList_item *pTerm = pGB->a; - n = pGB->nExpr; - for(j=0; jpExpr; - if( pE->op==TK_COLUMN && pE->iTable==pExpr->iTable && - pE->iColumn==pExpr->iColumn ){ - pCol->iSorterColumn = j; - break; - } - } - } - if( pCol->iSorterColumn<0 ){ - pCol->iSorterColumn = pAggInfo->nSortingColumn++; - } - } - /* There is now an entry for pExpr in pAggInfo->aCol[] (either - ** because it was there before or because we just created it). - ** Convert the pExpr to be a TK_AGG_COLUMN referring to that - ** pAggInfo->aCol[] entry. - */ - ExprSetVVAProperty(pExpr, EP_NoReduce); - pExpr->pAggInfo = pAggInfo; - pExpr->op = TK_AGG_COLUMN; - pExpr->iAgg = (i16)k; + findOrCreateAggInfoColumn(pParse, pAggInfo, pExpr); break; } /* endif pExpr->iTable==pItem->iCursor */ } /* end loop over pSrcList */ } - return WRC_Prune; + return WRC_Continue; } case TK_AGG_FUNCTION: { if( (pNC->ncFlags & NC_InAggFunc)==0 && pWalker->walkerDepth==pExpr->op2 + && pExpr->pAggInfo==0 ){ /* Check to see if pExpr is a duplicate of another aggregate ** function that is already in the pAggInfo structure */ struct AggInfo_func *pItem = pAggInfo->aFunc; for(i=0; inFunc; i++, pItem++){ - if( pItem->pFExpr==pExpr ) break; + if( NEVER(pItem->pFExpr==pExpr) ) break; if( sqlite3ExprCompare(0, pItem->pFExpr, pExpr, -1)==0 ){ break; } @@ -115122,15 +120617,44 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){ u8 enc = ENC(pParse->db); i = addAggInfoFunc(pParse->db, pAggInfo); if( i>=0 ){ + int nArg; assert( !ExprHasProperty(pExpr, EP_xIsSelect) ); pItem = &pAggInfo->aFunc[i]; pItem->pFExpr = pExpr; - pItem->iMem = ++pParse->nMem; assert( ExprUseUToken(pExpr) ); + nArg = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; pItem->pFunc = sqlite3FindFunction(pParse->db, - pExpr->u.zToken, - pExpr->x.pList ? pExpr->x.pList->nExpr : 0, enc, 0); - if( pExpr->flags & EP_Distinct ){ + pExpr->u.zToken, nArg, enc, 0); + assert( pItem->bOBUnique==0 ); + if( pExpr->pLeft + && (pItem->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL)==0 + ){ + /* The NEEDCOLL test above causes any ORDER BY clause on + ** aggregate min() or max() to be ignored. */ + ExprList *pOBList; + assert( nArg>0 ); + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + pItem->iOBTab = pParse->nTab++; + pOBList = pExpr->pLeft->x.pList; + assert( pOBList->nExpr>0 ); + assert( pItem->bOBUnique==0 ); + if( pOBList->nExpr==1 + && nArg==1 + && sqlite3ExprCompare(0,pOBList->a[0].pExpr, + pExpr->x.pList->a[0].pExpr,0)==0 + ){ + pItem->bOBPayload = 0; + pItem->bOBUnique = ExprHasProperty(pExpr, EP_Distinct); + }else{ + pItem->bOBPayload = 1; + } + pItem->bUseSubtype = + (pItem->pFunc->funcFlags & SQLITE_SUBTYPE)!=0; + }else{ + pItem->iOBTab = -1; + } + if( ExprHasProperty(pExpr, EP_Distinct) && !pItem->bOBUnique ){ pItem->iDistinct = pParse->nTab++; }else{ pItem->iDistinct = -1; @@ -115254,6 +120778,37 @@ SQLITE_PRIVATE void sqlite3ClearTempRegCache(Parse *pParse){ pParse->nRangeReg = 0; } +/* +** Make sure sufficient registers have been allocated so that +** iReg is a valid register number. +*/ +SQLITE_PRIVATE void sqlite3TouchRegister(Parse *pParse, int iReg){ + if( pParse->nMemnMem = iReg; +} + +#if defined(SQLITE_ENABLE_STAT4) || defined(SQLITE_DEBUG) +/* +** Return the latest reusable register in the set of all registers. +** The value returned is no less than iMin. If any register iMin or +** greater is in permanent use, then return one more than that last +** permanent register. +*/ +SQLITE_PRIVATE int sqlite3FirstAvailableRegister(Parse *pParse, int iMin){ + const ExprList *pList = pParse->pConstExpr; + if( pList ){ + int i; + for(i=0; inExpr; i++){ + if( pList->a[i].u.iConstExprReg>=iMin ){ + iMin = pList->a[i].u.iConstExprReg + 1; + } + } + } + pParse->nTempReg = 0; + pParse->nRangeReg = 0; + return iMin; +} +#endif /* SQLITE_ENABLE_STAT4 || SQLITE_DEBUG */ + /* ** Validate that no temporary register falls within the range of ** iFirst..iLast, inclusive. This routine is only call from within assert() @@ -115273,6 +120828,14 @@ SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse *pParse, int iFirst, int iLast){ return 0; } } + if( pParse->pConstExpr ){ + ExprList *pList = pParse->pConstExpr; + for(i=0; inExpr; i++){ + int iReg = pList->a[i].u.iConstExprReg; + if( iReg==0 ) continue; + if( iReg>=iFirst && iReg<=iLast ) return 0; + } + } return 1; } #endif /* SQLITE_DEBUG */ @@ -115727,14 +121290,19 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){ /* Verify that constraints are still satisfied */ if( pNew->pCheck!=0 || (pCol->notNull && (pCol->colFlags & COLFLAG_GENERATED)!=0) + || (pTab->tabFlags & TF_Strict)!=0 ){ sqlite3NestedParse(pParse, "SELECT CASE WHEN quick_check GLOB 'CHECK*'" " THEN raise(ABORT,'CHECK constraint failed')" + " WHEN quick_check GLOB 'non-* value in*'" + " THEN raise(ABORT,'type mismatch on DEFAULT')" " ELSE raise(ABORT,'NOT NULL constraint failed')" " END" " FROM pragma_quick_check(%Q,%Q)" - " WHERE quick_check GLOB 'CHECK*' OR quick_check GLOB 'NULL*'", + " WHERE quick_check GLOB 'CHECK*'" + " OR quick_check GLOB 'NULL*'" + " OR quick_check GLOB 'non-* value in*'", zTab, zDb ); } @@ -115823,7 +121391,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){ pNew->u.tab.pDfltList = sqlite3ExprListDup(db, pTab->u.tab.pDfltList, 0); pNew->pSchema = db->aDb[iDb].pSchema; pNew->u.tab.addColOffset = pTab->u.tab.addColOffset; - pNew->nTabRef = 1; + assert( pNew->nTabRef==1 ); exit_begin_add_column: sqlite3SrcListDelete(db, pSrc); @@ -116022,13 +121590,14 @@ static void renameTokenCheckAll(Parse *pParse, const void *pPtr){ assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 ); if( pParse->nErr==0 ){ const RenameToken *p; - u8 i = 0; + u32 i = 1; for(p=pParse->pRename; p; p=p->pNext){ if( p->p ){ assert( p->p!=pPtr ); - i += *(u8*)(p->p); + i += *(u8*)(p->p) | 1; } } + assert( i>0 ); } } #else @@ -116327,7 +121896,7 @@ static RenameToken *renameColumnTokenNext(RenameCtx *pCtx){ } /* -** An error occured while parsing or otherwise processing a database +** An error occurred while parsing or otherwise processing a database ** object (either pParse->pNewTable, pNewIndex or pNewTrigger) as part of an ** ALTER TABLE RENAME COLUMN program. The error message emitted by the ** sub-routine is currently stored in pParse->zErrMsg. This function @@ -116559,6 +122128,19 @@ static int renameEditSql( return rc; } +/* +** Set all pEList->a[].fg.eEName fields in the expression-list to val. +*/ +static void renameSetENames(ExprList *pEList, int val){ + if( pEList ){ + int i; + for(i=0; inExpr; i++){ + assert( val==ENAME_NAME || pEList->a[i].fg.eEName==ENAME_NAME ); + pEList->a[i].fg.eEName = val; + } + } +} + /* ** Resolve all symbols in the trigger at pParse->pNewTrigger, assuming ** it was read from the schema of database zDb. Return SQLITE_OK if @@ -116606,7 +122188,17 @@ static int renameResolveTrigger(Parse *pParse){ pSrc = 0; rc = SQLITE_NOMEM; }else{ + /* pStep->pExprList contains an expression-list used for an UPDATE + ** statement. So the a[].zEName values are the RHS of the + ** "
  • = " clauses of the UPDATE statement. So, before + ** running SelectPrep(), change all the eEName values in + ** pStep->pExprList to ENAME_SPAN (from their current value of + ** ENAME_NAME). This is to prevent any ids in ON() clauses that are + ** part of pSrc from being incorrectly resolved against the + ** a[].zEName values as if they were column aliases. */ + renameSetENames(pStep->pExprList, ENAME_SPAN); sqlite3SelectPrep(pParse, pSel, 0); + renameSetENames(pStep->pExprList, ENAME_NAME); rc = pParse->nErr ? SQLITE_ERROR : SQLITE_OK; assert( pStep->pExprList==0 || pStep->pExprList==pSel->pEList ); assert( pSrc==pSel->pSrc ); @@ -117825,9 +123417,9 @@ static void openStatTable( typedef struct StatAccum StatAccum; typedef struct StatSample StatSample; struct StatSample { - tRowcnt *anEq; /* sqlite_stat4.nEq */ tRowcnt *anDLt; /* sqlite_stat4.nDLt */ #ifdef SQLITE_ENABLE_STAT4 + tRowcnt *anEq; /* sqlite_stat4.nEq */ tRowcnt *anLt; /* sqlite_stat4.nLt */ union { i64 iRowid; /* Rowid in main table of the key */ @@ -117985,9 +123577,9 @@ static void statInit( /* Allocate the space required for the StatAccum object */ n = sizeof(*p) - + sizeof(tRowcnt)*nColUp /* StatAccum.anEq */ - + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */ + + sizeof(tRowcnt)*nColUp; /* StatAccum.anDLt */ #ifdef SQLITE_ENABLE_STAT4 + n += sizeof(tRowcnt)*nColUp; /* StatAccum.anEq */ if( mxSample ){ n += sizeof(tRowcnt)*nColUp /* StatAccum.anLt */ + sizeof(StatSample)*(nCol+mxSample) /* StatAccum.aBest[], a[] */ @@ -118008,9 +123600,9 @@ static void statInit( p->nKeyCol = nKeyCol; p->nSkipAhead = 0; p->current.anDLt = (tRowcnt*)&p[1]; - p->current.anEq = &p->current.anDLt[nColUp]; #ifdef SQLITE_ENABLE_STAT4 + p->current.anEq = &p->current.anDLt[nColUp]; p->mxSample = p->nLimit==0 ? mxSample : 0; if( mxSample ){ u8 *pSpace; /* Allocated space not yet assigned */ @@ -118277,7 +123869,9 @@ static void statPush( if( p->nRow==0 ){ /* This is the first call to this function. Do initialization. */ +#ifdef SQLITE_ENABLE_STAT4 for(i=0; inCol; i++) p->current.anEq[i] = 1; +#endif }else{ /* Second and subsequent calls get processed here */ #ifdef SQLITE_ENABLE_STAT4 @@ -118286,15 +123880,17 @@ static void statPush( /* Update anDLt[], anLt[] and anEq[] to reflect the values that apply ** to the current row of the index. */ +#ifdef SQLITE_ENABLE_STAT4 for(i=0; icurrent.anEq[i]++; } +#endif for(i=iChng; inCol; i++){ p->current.anDLt[i]++; #ifdef SQLITE_ENABLE_STAT4 if( p->mxSample ) p->current.anLt[i] += p->current.anEq[i]; -#endif p->current.anEq[i] = 1; +#endif } } @@ -118428,7 +124024,9 @@ static void statGet( u64 iVal = (p->nRow + nDistinct - 1) / nDistinct; if( iVal==2 && p->nRow*10 <= nDistinct*11 ) iVal = 1; sqlite3_str_appendf(&sStat, " %llu", iVal); +#ifdef SQLITE_ENABLE_STAT4 assert( p->current.anEq[i] ); +#endif } sqlite3ResultStrAccum(context, &sStat); } @@ -118514,6 +124112,7 @@ static void analyzeVdbeCommentIndexWithColumnName( if( NEVER(i==XN_ROWID) ){ VdbeComment((v,"%s.rowid",pIdx->zName)); }else if( i==XN_EXPR ){ + assert( pIdx->bHasExpr ); VdbeComment((v,"%s.expr(%d)",pIdx->zName, k)); }else{ VdbeComment((v,"%s.%s", pIdx->zName, pIdx->pTable->aCol[i].zCnName)); @@ -118554,11 +124153,15 @@ static void analyzeOneTable( int regIdxname = iMem++; /* Register containing index name */ int regStat1 = iMem++; /* Value for the stat column of sqlite_stat1 */ int regPrev = iMem; /* MUST BE LAST (see below) */ +#ifdef SQLITE_ENABLE_STAT4 + int doOnce = 1; /* Flag for a one-time computation */ +#endif #ifdef SQLITE_ENABLE_PREUPDATE_HOOK Table *pStat1 = 0; #endif - pParse->nMem = MAX(pParse->nMem, iMem); + sqlite3TouchRegister(pParse, iMem); + assert( sqlite3NoTempsInRange(pParse, regNewRowid, iMem) ); v = sqlite3GetVdbe(pParse); if( v==0 || NEVER(pTab==0) ){ return; @@ -118664,7 +124267,7 @@ static void analyzeOneTable( ** the regPrev array and a trailing rowid (the rowid slot is required ** when building a record to insert into the sample column of ** the sqlite_stat4 table. */ - pParse->nMem = MAX(pParse->nMem, regPrev+nColTest); + sqlite3TouchRegister(pParse, regPrev+nColTest); /* Open a read-only cursor on the index being analyzed. */ assert( iDb==sqlite3SchemaToIndex(db, pIdx->pSchema) ); @@ -118836,7 +124439,35 @@ static void analyzeOneTable( int addrIsNull; u8 seekOp = HasRowid(pTab) ? OP_NotExists : OP_NotFound; - pParse->nMem = MAX(pParse->nMem, regCol+nCol); + if( doOnce ){ + int mxCol = nCol; + Index *pX; + + /* Compute the maximum number of columns in any index */ + for(pX=pTab->pIndex; pX; pX=pX->pNext){ + int nColX; /* Number of columns in pX */ + if( !HasRowid(pTab) && IsPrimaryKeyIndex(pX) ){ + nColX = pX->nKeyCol; + }else{ + nColX = pX->nColumn; + } + if( nColX>mxCol ) mxCol = nColX; + } + + /* Allocate space to compute results for the largest index */ + sqlite3TouchRegister(pParse, regCol+mxCol); + doOnce = 0; +#ifdef SQLITE_DEBUG + /* Verify that the call to sqlite3ClearTempRegCache() below + ** really is needed. + ** https://sqlite.org/forum/forumpost/83cb4a95a0 (2023-03-25) + */ + testcase( !sqlite3NoTempsInRange(pParse, regEq, regCol+mxCol) ); +#endif + sqlite3ClearTempRegCache(pParse); /* tag-20230325-1 */ + assert( sqlite3NoTempsInRange(pParse, regEq, regCol+mxCol) ); + } + assert( sqlite3NoTempsInRange(pParse, regEq, regCol+nCol) ); addrNext = sqlite3VdbeCurrentAddr(v); callStatGet(pParse, regStat, STAT_GET_ROWID, regSampleRowid); @@ -118917,6 +124548,11 @@ static void analyzeDatabase(Parse *pParse, int iDb){ for(k=sqliteHashFirst(&pSchema->tblHash); k; k=sqliteHashNext(k)){ Table *pTab = (Table*)sqliteHashData(k); analyzeOneTable(pParse, pTab, 0, iStatCur, iMem, iTab); +#ifdef SQLITE_ENABLE_STAT4 + iMem = sqlite3FirstAvailableRegister(pParse, iMem); +#else + assert( iMem==sqlite3FirstAvailableRegister(pParse,iMem) ); +#endif } loadAnalysis(pParse, iDb); } @@ -119079,6 +124715,16 @@ static void decodeIntArray( while( z[0]!=0 && z[0]!=' ' ) z++; while( z[0]==' ' ) z++; } + + /* Set the bLowQual flag if the peak number of rows obtained + ** from a full equality match is so large that a full table scan + ** seems likely to be faster than using the index. + */ + if( aLog[0] > 66 /* Index has more than 100 rows */ + && aLog[0] <= aLog[nOut-1] /* And only a single value seen */ + ){ + pIndex->bLowQual = 1; + } } } @@ -119157,6 +124803,8 @@ static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){ ** and its contents. */ SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){ + assert( db!=0 ); + assert( pIdx!=0 ); #ifdef SQLITE_ENABLE_STAT4 if( pIdx->aSample ){ int j; @@ -119166,7 +124814,7 @@ SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){ } sqlite3DbFree(db, pIdx->aSample); } - if( db && db->pnBytesFreed==0 ){ + if( db->pnBytesFreed==0 ){ pIdx->nSample = 0; pIdx->aSample = 0; } @@ -119302,6 +124950,10 @@ static int loadStatTbl( pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); assert( pIdx==0 || pIdx->nSample==0 ); if( pIdx==0 ) continue; + if( pIdx->aSample!=0 ){ + /* The same index appears in sqlite_stat4 under multiple names */ + continue; + } assert( !HasRowid(pIdx->pTable) || pIdx->nColumn==pIdx->nKeyCol+1 ); if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){ nIdxCol = pIdx->nKeyCol; @@ -119309,6 +124961,7 @@ static int loadStatTbl( nIdxCol = pIdx->nColumn; } pIdx->nSampleCol = nIdxCol; + pIdx->mxSample = nSample; nByte = sizeof(IndexSample) * nSample; nByte += sizeof(tRowcnt) * nIdxCol * 3 * nSample; nByte += nIdxCol * sizeof(tRowcnt); /* Space for Index.aAvgEq[] */ @@ -119348,6 +125001,11 @@ static int loadStatTbl( if( zIndex==0 ) continue; pIdx = findIndexOrPrimaryKey(db, zIndex, zDb); if( pIdx==0 ) continue; + if( pIdx->nSample>=pIdx->mxSample ){ + /* Too many slots used because the same index appears in + ** sqlite_stat4 using multiple names */ + continue; + } /* This next condition is true if data has already been loaded from ** the sqlite_stat4 table. */ nCol = pIdx->nSampleCol; @@ -119360,14 +125018,15 @@ static int loadStatTbl( decodeIntArray((char*)sqlite3_column_text(pStmt,2),nCol,pSample->anLt,0,0); decodeIntArray((char*)sqlite3_column_text(pStmt,3),nCol,pSample->anDLt,0,0); - /* Take a copy of the sample. Add two 0x00 bytes the end of the buffer. + /* Take a copy of the sample. Add 8 extra 0x00 bytes the end of the buffer. ** This is in case the sample record is corrupted. In that case, the ** sqlite3VdbeRecordCompare() may read up to two varints past the ** end of the allocated buffer before it realizes it is dealing with - ** a corrupt record. Adding the two 0x00 bytes prevents this from causing + ** a corrupt record. Or it might try to read a large integer from the + ** buffer. In any case, eight 0x00 bytes prevents this from causing ** a buffer overread. */ pSample->n = sqlite3_column_bytes(pStmt, 4); - pSample->p = sqlite3DbMallocZero(db, pSample->n + 2); + pSample->p = sqlite3DbMallocZero(db, pSample->n + 8); if( pSample->p==0 ){ sqlite3_finalize(pStmt); return SQLITE_NOMEM_BKPT; @@ -119391,11 +125050,12 @@ static int loadStat4(sqlite3 *db, const char *zDb){ const Table *pStat4; assert( db->lookaside.bDisable ); - if( (pStat4 = sqlite3FindTable(db, "sqlite_stat4", zDb))!=0 + if( OptimizationEnabled(db, SQLITE_Stat4) + && (pStat4 = sqlite3FindTable(db, "sqlite_stat4", zDb))!=0 && IsOrdinaryTable(pStat4) ){ rc = loadStatTbl(db, - "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx", + "SELECT idx,count(*) FROM %Q.sqlite_stat4 GROUP BY idx COLLATE nocase", "SELECT idx,neq,nlt,ndlt,sample FROM %Q.sqlite_stat4", zDb ); @@ -119585,7 +125245,7 @@ static void attachFunc( char *zErr = 0; unsigned int flags; Db *aNew; /* New array of Db pointers */ - Db *pNew; /* Db object for the newly attached database */ + Db *pNew = 0; /* Db object for the newly attached database */ char *zErrDyn = 0; sqlite3_vfs *pVfs; @@ -119605,13 +125265,26 @@ static void attachFunc( /* This is not a real ATTACH. Instead, this routine is being called ** from sqlite3_deserialize() to close database db->init.iDb and ** reopen it as a MemDB */ + Btree *pNewBt = 0; pVfs = sqlite3_vfs_find("memdb"); if( pVfs==0 ) return; - pNew = &db->aDb[db->init.iDb]; - if( pNew->pBt ) sqlite3BtreeClose(pNew->pBt); - pNew->pBt = 0; - pNew->pSchema = 0; - rc = sqlite3BtreeOpen(pVfs, "x\0", db, &pNew->pBt, 0, SQLITE_OPEN_MAIN_DB); + rc = sqlite3BtreeOpen(pVfs, "x\0", db, &pNewBt, 0, SQLITE_OPEN_MAIN_DB); + if( rc==SQLITE_OK ){ + Schema *pNewSchema = sqlite3SchemaGet(db, pNewBt); + if( pNewSchema ){ + /* Both the Btree and the new Schema were allocated successfully. + ** Close the old db and update the aDb[] slot with the new memdb + ** values. */ + pNew = &db->aDb[db->init.iDb]; + if( ALWAYS(pNew->pBt) ) sqlite3BtreeClose(pNew->pBt); + pNew->pBt = pNewBt; + pNew->pSchema = pNewSchema; + }else{ + sqlite3BtreeClose(pNewBt); + rc = SQLITE_NOMEM; + } + } + if( rc ) goto attach_error; }else{ /* This is a real ATTACH ** @@ -119761,7 +125434,7 @@ static void attachFunc( } #endif if( rc ){ - if( !REOPEN_AS_MEMDB(db) ){ + if( ALWAYS(!REOPEN_AS_MEMDB(db)) ){ int iDb = db->nDb - 1; assert( iDb>=2 ); if( db->aDb[iDb].pBt ){ @@ -119878,6 +125551,8 @@ static void codeAttach( sqlite3* db = pParse->db; int regArgs; + if( SQLITE_OK!=sqlite3ReadSchema(pParse) ) goto attach_end; + if( pParse->nErr ) goto attach_end; memset(&sName, 0, sizeof(NameContext)); sName.pParse = pParse; @@ -120346,7 +126021,7 @@ SQLITE_PRIVATE int sqlite3AuthCheck( sqlite3 *db = pParse->db; int rc; - /* Don't do any authorization checks if the database is initialising + /* Don't do any authorization checks if the database is initializing ** or if the parser is being invoked from within sqlite3_declare_vtab. */ assert( !IN_RENAME_OBJECT || db->xAuth==0 ); @@ -120553,6 +126228,7 @@ SQLITE_PRIVATE int sqlite3DbMaskAllZero(yDbMask m){ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ sqlite3 *db; Vdbe *v; + int iDb, i; assert( pParse->pToplevel==0 ); db = pParse->db; @@ -120582,7 +126258,6 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ if( pParse->bReturning ){ Returning *pReturning = pParse->u1.pReturning; int addrRewind; - int i; int reg; if( pReturning->nRetCol ){ @@ -120619,76 +126294,66 @@ SQLITE_PRIVATE void sqlite3FinishCoding(Parse *pParse){ ** transaction on each used database and to verify the schema cookie ** on each used database. */ - if( db->mallocFailed==0 - && (DbMaskNonZero(pParse->cookieMask) || pParse->pConstExpr) - ){ - int iDb, i; - assert( sqlite3VdbeGetOp(v, 0)->opcode==OP_Init ); - sqlite3VdbeJumpHere(v, 0); - assert( db->nDb>0 ); - iDb = 0; - do{ - Schema *pSchema; - if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue; - sqlite3VdbeUsesBtree(v, iDb); - pSchema = db->aDb[iDb].pSchema; - sqlite3VdbeAddOp4Int(v, - OP_Transaction, /* Opcode */ - iDb, /* P1 */ - DbMaskTest(pParse->writeMask,iDb), /* P2 */ - pSchema->schema_cookie, /* P3 */ - pSchema->iGeneration /* P4 */ - ); - if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1); - VdbeComment((v, - "usesStmtJournal=%d", pParse->mayAbort && pParse->isMultiWrite)); - }while( ++iDbnDb ); + assert( pParse->nErr>0 || sqlite3VdbeGetOp(v, 0)->opcode==OP_Init ); + sqlite3VdbeJumpHere(v, 0); + assert( db->nDb>0 ); + iDb = 0; + do{ + Schema *pSchema; + if( DbMaskTest(pParse->cookieMask, iDb)==0 ) continue; + sqlite3VdbeUsesBtree(v, iDb); + pSchema = db->aDb[iDb].pSchema; + sqlite3VdbeAddOp4Int(v, + OP_Transaction, /* Opcode */ + iDb, /* P1 */ + DbMaskTest(pParse->writeMask,iDb), /* P2 */ + pSchema->schema_cookie, /* P3 */ + pSchema->iGeneration /* P4 */ + ); + if( db->init.busy==0 ) sqlite3VdbeChangeP5(v, 1); + VdbeComment((v, + "usesStmtJournal=%d", pParse->mayAbort && pParse->isMultiWrite)); + }while( ++iDbnDb ); #ifndef SQLITE_OMIT_VIRTUALTABLE - for(i=0; inVtabLock; i++){ - char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]); - sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB); - } - pParse->nVtabLock = 0; + for(i=0; inVtabLock; i++){ + char *vtab = (char *)sqlite3GetVTable(db, pParse->apVtabLock[i]); + sqlite3VdbeAddOp4(v, OP_VBegin, 0, 0, 0, vtab, P4_VTAB); + } + pParse->nVtabLock = 0; #endif - /* Once all the cookies have been verified and transactions opened, - ** obtain the required table-locks. This is a no-op unless the - ** shared-cache feature is enabled. - */ - codeTableLocks(pParse); +#ifndef SQLITE_OMIT_SHARED_CACHE + /* Once all the cookies have been verified and transactions opened, + ** obtain the required table-locks. This is a no-op unless the + ** shared-cache feature is enabled. + */ + if( pParse->nTableLock ) codeTableLocks(pParse); +#endif - /* Initialize any AUTOINCREMENT data structures required. - */ - sqlite3AutoincrementBegin(pParse); + /* Initialize any AUTOINCREMENT data structures required. + */ + if( pParse->pAinc ) sqlite3AutoincrementBegin(pParse); - /* Code constant expressions that where factored out of inner loops. - ** - ** The pConstExpr list might also contain expressions that we simply - ** want to keep around until the Parse object is deleted. Such - ** expressions have iConstExprReg==0. Do not generate code for - ** those expressions, of course. - */ - if( pParse->pConstExpr ){ - ExprList *pEL = pParse->pConstExpr; - pParse->okConstFactor = 0; - for(i=0; inExpr; i++){ - int iReg = pEL->a[i].u.iConstExprReg; - if( iReg>0 ){ - sqlite3ExprCode(pParse, pEL->a[i].pExpr, iReg); - } - } + /* Code constant expressions that were factored out of inner loops. + */ + if( pParse->pConstExpr ){ + ExprList *pEL = pParse->pConstExpr; + pParse->okConstFactor = 0; + for(i=0; inExpr; i++){ + assert( pEL->a[i].u.iConstExprReg>0 ); + sqlite3ExprCode(pParse, pEL->a[i].pExpr, pEL->a[i].u.iConstExprReg); } + } - if( pParse->bReturning ){ - Returning *pRet = pParse->u1.pReturning; - if( pRet->nRetCol ){ - sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); - } + if( pParse->bReturning ){ + Returning *pRet = pParse->u1.pReturning; + if( pRet->nRetCol ){ + sqlite3VdbeAddOp2(v, OP_OpenEphemeral, pRet->iRetCur, pRet->nRetCol); } - - /* Finally, jump back to the beginning of the executable code. */ - sqlite3VdbeGoto(v, 1); } + + /* Finally, jump back to the beginning of the executable code. */ + sqlite3VdbeGoto(v, 1); } /* Get the VDBE program ready for execution @@ -120727,6 +126392,7 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){ char saveBuf[PARSE_TAIL_SZ]; if( pParse->nErr ) return; + if( pParse->eParseMode ) return; assert( pParse->nested<10 ); /* Nesting should only be of limited depth */ va_start(ap, zFormat); zSql = sqlite3VMPrintf(db, zFormat, ap); @@ -120873,7 +126539,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( /* If zName is the not the name of a table in the schema created using ** CREATE, then check to see if it is the name of an virtual table that ** can be an eponymous virtual table. */ - if( pParse->disableVtab==0 && db->init.busy==0 ){ + if( (pParse->prepFlags & SQLITE_PREPARE_NO_VTAB)==0 && db->init.busy==0 ){ Module *pMod = (Module*)sqlite3HashFind(&db->aModule, zName); if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){ pMod = sqlite3PragmaVtabRegister(db, zName); @@ -120886,7 +126552,7 @@ SQLITE_PRIVATE Table *sqlite3LocateTable( #endif if( flags & LOCATE_NOERR ) return 0; pParse->checkSchema = 1; - }else if( IsVirtual(p) && pParse->disableVtab ){ + }else if( IsVirtual(p) && (pParse->prepFlags & SQLITE_PREPARE_NO_VTAB)!=0 ){ p = 0; } @@ -121142,7 +126808,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetExpr( */ SQLITE_PRIVATE Expr *sqlite3ColumnExpr(Table *pTab, Column *pCol){ if( pCol->iDflt==0 ) return 0; - if( NEVER(!IsOrdinaryTable(pTab)) ) return 0; + if( !IsOrdinaryTable(pTab) ) return 0; if( NEVER(pTab->u.tab.pDfltList==0) ) return 0; if( NEVER(pTab->u.tab.pDfltList->nExpriDflt) ) return 0; return pTab->u.tab.pDfltList->a[pCol->iDflt-1].pExpr; @@ -121174,7 +126840,7 @@ SQLITE_PRIVATE void sqlite3ColumnSetColl( } /* -** Return the collating squence name for a column +** Return the collating sequence name for a column */ SQLITE_PRIVATE const char *sqlite3ColumnColl(Column *pCol){ const char *z; @@ -121195,16 +126861,17 @@ SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3 *db, Table *pTable){ int i; Column *pCol; assert( pTable!=0 ); + assert( db!=0 ); if( (pCol = pTable->aCol)!=0 ){ for(i=0; inCol; i++, pCol++){ assert( pCol->zCnName==0 || pCol->hName==sqlite3StrIHash(pCol->zCnName) ); sqlite3DbFree(db, pCol->zCnName); } - sqlite3DbFree(db, pTable->aCol); + sqlite3DbNNFreeNN(db, pTable->aCol); if( IsOrdinaryTable(pTable) ){ sqlite3ExprListDelete(db, pTable->u.tab.pDfltList); } - if( db==0 || db->pnBytesFreed==0 ){ + if( db->pnBytesFreed==0 ){ pTable->aCol = 0; pTable->nCol = 0; if( IsOrdinaryTable(pTable) ){ @@ -121241,7 +126908,8 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ ** a Table object that was going to be marked ephemeral. So do not check ** that no lookaside memory is used in this case either. */ int nLookaside = 0; - if( db && !db->mallocFailed && (pTable->tabFlags & TF_Ephemeral)==0 ){ + assert( db!=0 ); + if( !db->mallocFailed && (pTable->tabFlags & TF_Ephemeral)==0 ){ nLookaside = sqlite3LookasideUsed(db, 0); } #endif @@ -121251,7 +126919,7 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ pNext = pIndex->pNext; assert( pIndex->pSchema==pTable->pSchema || (IsVirtual(pTable) && pIndex->idxType!=SQLITE_IDXTYPE_APPDEF) ); - if( (db==0 || db->pnBytesFreed==0) && !IsVirtual(pTable) ){ + if( db->pnBytesFreed==0 && !IsVirtual(pTable) ){ char *zName = pIndex->zName; TESTONLY ( Index *pOld = ) sqlite3HashInsert( &pIndex->pSchema->idxHash, zName, 0 @@ -121265,7 +126933,7 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ if( IsOrdinaryTable(pTable) ){ sqlite3FkDelete(db, pTable); } -#ifndef SQLITE_OMIT_VIRTUAL_TABLE +#ifndef SQLITE_OMIT_VIRTUALTABLE else if( IsVirtual(pTable) ){ sqlite3VtabClear(db, pTable); } @@ -121288,10 +126956,14 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){ } SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){ /* Do not delete the table until the reference count reaches zero. */ + assert( db!=0 ); if( !pTable ) return; - if( ((!db || db->pnBytesFreed==0) && (--pTable->nTabRef)>0) ) return; + if( db->pnBytesFreed==0 && (--pTable->nTabRef)>0 ) return; deleteTable(db, pTable); } +SQLITE_PRIVATE void sqlite3DeleteTableGeneric(sqlite3 *db, void *pTable){ + sqlite3DeleteTable(db, (Table*)pTable); +} /* @@ -121826,20 +127498,14 @@ SQLITE_PRIVATE void sqlite3ColumnPropertiesFromName(Table *pTab, Column *pCol){ } #endif -/* -** Name of the special TEMP trigger used to implement RETURNING. The -** name begins with "sqlite_" so that it is guaranteed not to collide -** with any application-generated triggers. -*/ -#define RETURNING_TRIGGER_NAME "sqlite_returning" - /* ** Clean up the data structures associated with the RETURNING clause. */ -static void sqlite3DeleteReturning(sqlite3 *db, Returning *pRet){ +static void sqlite3DeleteReturning(sqlite3 *db, void *pArg){ + Returning *pRet = (Returning*)pArg; Hash *pHash; pHash = &(db->aDb[1].pSchema->trigHash); - sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, 0); + sqlite3HashInsert(pHash, pRet->zName, 0); sqlite3ExprListDelete(db, pRet->pReturnEL); sqlite3DbFree(db, pRet); } @@ -121867,7 +127533,7 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ if( pParse->pNewTrigger ){ sqlite3ErrorMsg(pParse, "cannot use RETURNING in a trigger"); }else{ - assert( pParse->bReturning==0 ); + assert( pParse->bReturning==0 || pParse->ifNotExists ); } pParse->bReturning = 1; pRet = sqlite3DbMallocZero(db, sizeof(*pRet)); @@ -121878,11 +127544,12 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ pParse->u1.pReturning = pRet; pRet->pParse = pParse; pRet->pReturnEL = pList; - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3DeleteReturning, pRet); + sqlite3ParserAddCleanup(pParse, sqlite3DeleteReturning, pRet); testcase( pParse->earlyCleanup ); if( db->mallocFailed ) return; - pRet->retTrig.zName = RETURNING_TRIGGER_NAME; + sqlite3_snprintf(sizeof(pRet->zName), pRet->zName, + "sqlite_returning_%p", pParse); + pRet->retTrig.zName = pRet->zName; pRet->retTrig.op = TK_RETURNING; pRet->retTrig.tr_tm = TRIGGER_AFTER; pRet->retTrig.bReturning = 1; @@ -121893,8 +127560,9 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){ pRet->retTStep.pTrig = &pRet->retTrig; pRet->retTStep.pExprList = pList; pHash = &(db->aDb[1].pSchema->trigHash); - assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0 || pParse->nErr ); - if( sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, &pRet->retTrig) + assert( sqlite3HashFind(pHash, pRet->zName)==0 + || pParse->nErr || pParse->ifNotExists ); + if( sqlite3HashInsert(pHash, pRet->zName, &pRet->retTrig) ==&pRet->retTrig ){ sqlite3OomFault(db); } @@ -121928,7 +127596,7 @@ SQLITE_PRIVATE void sqlite3AddColumn(Parse *pParse, Token sName, Token sType){ } if( !IN_RENAME_OBJECT ) sqlite3DequoteToken(&sName); - /* Because keywords GENERATE ALWAYS can be converted into indentifiers + /* Because keywords GENERATE ALWAYS can be converted into identifiers ** by the parser, we can sometimes end up with a typename that ends ** with "generated always". Check for this case and omit the surplus ** text. */ @@ -122075,7 +127743,8 @@ SQLITE_PRIVATE char sqlite3AffinityType(const char *zIn, Column *pCol){ assert( zIn!=0 ); while( zIn[0] ){ - h = (h<<8) + sqlite3UpperToLower[(*zIn)&0xff]; + u8 x = *(u8*)zIn; + h = (h<<8) + sqlite3UpperToLower[x]; zIn++; if( h==(('c'<<24)+('h'<<16)+('a'<<8)+'r') ){ /* CHAR */ aff = SQLITE_AFF_TEXT; @@ -122149,7 +127818,7 @@ SQLITE_PRIVATE void sqlite3AddDefaultValue( Parse *pParse, /* Parsing context */ Expr *pExpr, /* The parsed expression of the default value */ const char *zStart, /* Start of the default value text */ - const char *zEnd /* First character past end of defaut value text */ + const char *zEnd /* First character past end of default value text */ ){ Table *p; Column *pCol; @@ -122421,6 +128090,14 @@ SQLITE_PRIVATE void sqlite3AddGenerated(Parse *pParse, Expr *pExpr, Token *pType if( pCol->colFlags & COLFLAG_PRIMKEY ){ makeColumnPartOfPrimaryKey(pParse, pCol); /* For the error message */ } + if( ALWAYS(pExpr) && pExpr->op==TK_ID ){ + /* The value of a generated column needs to be a real expression, not + ** just a reference to another column, in order for covering index + ** optimizations to work correctly. So if the value is not an expression, + ** turn it into one by adding a unary "+" operator. */ + pExpr = sqlite3PExpr(pParse, TK_UPLUS, pExpr, 0); + } + if( pExpr && pExpr->op!=TK_RAISE ) pExpr->affExpr = pCol->affinity; sqlite3ColumnSetExpr(pParse, pTab, pCol, pExpr); pExpr = 0; goto generated_done; @@ -122489,7 +128166,7 @@ static int identLength(const char *z){ ** to the specified offset in the buffer and updates *pIdx to refer ** to the first byte after the last byte written before returning. ** -** If the string zSignedIdent consists entirely of alpha-numeric +** If the string zSignedIdent consists entirely of alphanumeric ** characters, does not begin with a digit and is not an SQL keyword, ** then it is copied to the output buffer exactly as it is. Otherwise, ** it is quoted using double-quotes. @@ -122557,7 +128234,8 @@ static char *createTableStmt(sqlite3 *db, Table *p){ /* SQLITE_AFF_TEXT */ " TEXT", /* SQLITE_AFF_NUMERIC */ " NUM", /* SQLITE_AFF_INTEGER */ " INT", - /* SQLITE_AFF_REAL */ " REAL" + /* SQLITE_AFF_REAL */ " REAL", + /* SQLITE_AFF_FLEXNUM */ " NUM", }; int len; const char *zType; @@ -122573,10 +128251,12 @@ static char *createTableStmt(sqlite3 *db, Table *p){ testcase( pCol->affinity==SQLITE_AFF_NUMERIC ); testcase( pCol->affinity==SQLITE_AFF_INTEGER ); testcase( pCol->affinity==SQLITE_AFF_REAL ); + testcase( pCol->affinity==SQLITE_AFF_FLEXNUM ); zType = azType[pCol->affinity - SQLITE_AFF_BLOB]; len = sqlite3Strlen30(zType); assert( pCol->affinity==SQLITE_AFF_BLOB + || pCol->affinity==SQLITE_AFF_FLEXNUM || pCol->affinity==sqlite3AffinityType(zType, 0) ); memcpy(&zStmt[k], zType, len); k += len; @@ -122638,7 +128318,7 @@ static void estimateIndexWidth(Index *pIdx){ for(i=0; inColumn; i++){ i16 x = pIdx->aiColumn[i]; assert( xpTable->nCol ); - wIndex += x<0 ? 1 : aCol[pIdx->aiColumn[i]].szEst; + wIndex += x<0 ? 1 : aCol[x].szEst; } pIdx->szIdxRow = sqlite3LogEst(wIndex*4); } @@ -122693,7 +128373,8 @@ static int isDupColumn(Index *pIdx, int nKey, Index *pPk, int iCol){ /* Recompute the colNotIdxed field of the Index. ** ** colNotIdxed is a bitmask that has a 0 bit representing each indexed -** columns that are within the first 63 columns of the table. The +** columns that are within the first 63 columns of the table and a 1 for +** all other bits (all columns that are not in the index). The ** high-order bit of colNotIdxed is always 1. All unindexed columns ** of the table have a 1. ** @@ -122721,7 +128402,7 @@ static void recomputeColumnsNotIndexed(Index *pIdx){ } } pIdx->colNotIdxed = ~m; - assert( (pIdx->colNotIdxed>>63)==1 ); + assert( (pIdx->colNotIdxed>>63)==1 ); /* See note-20221022-a */ } /* @@ -122990,6 +128671,7 @@ SQLITE_PRIVATE int sqlite3ShadowTableName(sqlite3 *db, const char *zName){ ** not pass them into code generator routines by mistake. */ static int markImmutableExprStep(Walker *pWalker, Expr *pExpr){ + (void)pWalker; ExprSetVVAProperty(pExpr, EP_Immutable); return WRC_Continue; } @@ -123325,6 +129007,17 @@ SQLITE_PRIVATE void sqlite3EndTable( /* Reparse everything to update our internal data structures */ sqlite3VdbeAddParseSchemaOp(v, iDb, sqlite3MPrintf(db, "tbl_name='%q' AND type!='trigger'", p->zName),0); + + /* Test for cycles in generated columns and illegal expressions + ** in CHECK constraints and in DEFAULT clauses. */ + if( p->tabFlags & TF_HasGenerated ){ + sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, + sqlite3MPrintf(db, "SELECT*FROM\"%w\".\"%w\"", + db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); + } + sqlite3VdbeAddOp4(v, OP_SqlExec, 1, 0, 0, + sqlite3MPrintf(db, "PRAGMA \"%w\".integrity_check(%Q)", + db->aDb[iDb].zDbSName, p->zName), P4_DYNAMIC); } /* Add the table to the in-memory representation of the database. @@ -123401,9 +129094,12 @@ SQLITE_PRIVATE void sqlite3CreateView( ** on a view, even though views do not have rowids. The following flag ** setting fixes this problem. But the fix can be disabled by compiling ** with -DSQLITE_ALLOW_ROWID_IN_VIEW in case there are legacy apps that - ** depend upon the old buggy behavior. */ -#ifndef SQLITE_ALLOW_ROWID_IN_VIEW - p->tabFlags |= TF_NoVisibleRowid; + ** depend upon the old buggy behavior. The ability can also be toggled + ** using sqlite3_config(SQLITE_CONFIG_ROWID_IN_VIEW,...) */ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + p->tabFlags |= sqlite3Config.mNoVisibleRowid; /* Optional. Allow by default */ +#else + p->tabFlags |= TF_NoVisibleRowid; /* Never allow rowid in view */ #endif sqlite3TwoPartName(pParse, pName1, pName2, &pName); @@ -123462,7 +129158,7 @@ SQLITE_PRIVATE void sqlite3CreateView( ** the columns of the view in the pTable structure. Return the number ** of errors. If an error is seen leave an error message in pParse->zErrMsg. */ -SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ +static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){ Table *pSelTab; /* A fake table from which we get the result set */ Select *pSel; /* Copy of the SELECT that implements the view */ int nErr = 0; /* Number of errors encountered */ @@ -123487,9 +129183,10 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ #ifndef SQLITE_OMIT_VIEW /* A positive nCol means the columns names for this view are - ** already known. + ** already known. This routine is not called unless either the + ** table is virtual or nCol is zero. */ - if( pTable->nCol>0 ) return 0; + assert( pTable->nCol<=0 ); /* A negative nCol is a special marker meaning that we are currently ** trying to compute the column names. If we enter this routine with @@ -123555,8 +129252,7 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ && pTable->nCol==pSel->pEList->nExpr ){ assert( db->mallocFailed==0 ); - sqlite3SelectAddColumnTypeAndCollation(pParse, pTable, pSel, - SQLITE_AFF_NONE); + sqlite3SubqueryColumnTypes(pParse, pTable, pSel, SQLITE_AFF_NONE); } }else{ /* CREATE VIEW name AS... without an argument list. Construct @@ -123585,6 +129281,11 @@ SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ #endif /* SQLITE_OMIT_VIEW */ return nErr; } +SQLITE_PRIVATE int sqlite3ViewGetColumnNames(Parse *pParse, Table *pTable){ + assert( pTable!=0 ); + if( !IsVirtual(pTable) && pTable->nCol>0 ) return 0; + return viewGetColumnNames(pParse, pTable); +} #endif /* !defined(SQLITE_OMIT_VIEW) || !defined(SQLITE_OMIT_VIRTUALTABLE) */ #ifndef SQLITE_OMIT_VIEW @@ -124369,7 +130070,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( #ifndef SQLITE_OMIT_TEMPDB /* If the index name was unqualified, check if the table ** is a temp table. If so, set the database to 1. Do not do this - ** if initialising a database schema. + ** if initializing a database schema. */ if( !db->init.busy ){ pTab = sqlite3SrcListLookup(pParse, pTblName); @@ -124450,7 +130151,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( } if( !IN_RENAME_OBJECT ){ if( !db->init.busy ){ - if( sqlite3FindTable(db, zName, 0)!=0 ){ + if( sqlite3FindTable(db, zName, pDb->zDbSName)!=0 ){ sqlite3ErrorMsg(pParse, "there is already a table named %s", zName); goto exit_create_index; } @@ -124603,6 +130304,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( j = XN_EXPR; pIndex->aiColumn[i] = XN_EXPR; pIndex->uniqNotNull = 0; + pIndex->bHasExpr = 1; }else{ j = pCExpr->iColumn; assert( j<=0x7fff ); @@ -124614,6 +130316,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex( } if( pTab->aCol[j].colFlags & COLFLAG_VIRTUAL ){ pIndex->bHasVCol = 1; + pIndex->bHasExpr = 1; } } pIndex->aiColumn[i] = (i16)j; @@ -125103,12 +130806,13 @@ SQLITE_PRIVATE IdList *sqlite3IdListAppend(Parse *pParse, IdList *pList, Token * */ SQLITE_PRIVATE void sqlite3IdListDelete(sqlite3 *db, IdList *pList){ int i; + assert( db!=0 ); if( pList==0 ) return; assert( pList->eU4!=EU4_EXPR ); /* EU4_EXPR mode is not currently used */ for(i=0; inId; i++){ sqlite3DbFree(db, pList->a[i].zName); } - sqlite3DbFreeNN(db, pList); + sqlite3DbNNFreeNN(db, pList); } /* @@ -125311,11 +131015,12 @@ SQLITE_PRIVATE void sqlite3SrcListAssignCursors(Parse *pParse, SrcList *pList){ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ int i; SrcItem *pItem; + assert( db!=0 ); if( pList==0 ) return; for(pItem=pList->a, i=0; inSrc; i++, pItem++){ - if( pItem->zDatabase ) sqlite3DbFreeNN(db, pItem->zDatabase); - sqlite3DbFree(db, pItem->zName); - if( pItem->zAlias ) sqlite3DbFreeNN(db, pItem->zAlias); + if( pItem->zDatabase ) sqlite3DbNNFreeNN(db, pItem->zDatabase); + if( pItem->zName ) sqlite3DbNNFreeNN(db, pItem->zName); + if( pItem->zAlias ) sqlite3DbNNFreeNN(db, pItem->zAlias); if( pItem->fg.isIndexedBy ) sqlite3DbFree(db, pItem->u1.zIndexedBy); if( pItem->fg.isTabFunc ) sqlite3ExprListDelete(db, pItem->u1.pFuncArg); sqlite3DeleteTable(db, pItem->pTab); @@ -125326,7 +131031,7 @@ SQLITE_PRIVATE void sqlite3SrcListDelete(sqlite3 *db, SrcList *pList){ sqlite3ExprDelete(db, pItem->u3.pOn); } } - sqlite3DbFreeNN(db, pList); + sqlite3DbNNFreeNN(db, pList); } /* @@ -125907,7 +131612,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ if( iDb<0 ) return; z = sqlite3NameFromToken(db, pObjName); if( z==0 ) return; - zDb = db->aDb[iDb].zDbSName; + zDb = pName2->n ? db->aDb[iDb].zDbSName : 0; pTab = sqlite3FindTable(db, z, zDb); if( pTab ){ reindexTable(pParse, pTab, 0); @@ -125917,6 +131622,7 @@ SQLITE_PRIVATE void sqlite3Reindex(Parse *pParse, Token *pName1, Token *pName2){ pIndex = sqlite3FindIndex(db, z, zDb); sqlite3DbFree(db, z); if( pIndex ){ + iDb = sqlite3SchemaToIndex(db, pIndex->pTable->pSchema); sqlite3BeginWriteOperation(pParse, 0, iDb); sqlite3RefillIndex(pParse, pIndex, -1); return; @@ -126022,7 +131728,7 @@ SQLITE_PRIVATE void sqlite3CteDelete(sqlite3 *db, Cte *pCte){ /* ** This routine is invoked once per CTE by the parser while parsing a -** WITH clause. The CTE described by teh third argument is added to +** WITH clause. The CTE described by the third argument is added to ** the WITH clause of the second argument. If the second argument is ** NULL, then a new WITH argument is created. */ @@ -126082,6 +131788,9 @@ SQLITE_PRIVATE void sqlite3WithDelete(sqlite3 *db, With *pWith){ sqlite3DbFree(db, pWith); } } +SQLITE_PRIVATE void sqlite3WithDeleteGeneric(sqlite3 *db, void *pWith){ + sqlite3WithDelete(db, (With*)pWith); +} #endif /* !defined(SQLITE_OMIT_CTE) */ /************** End of build.c ***********************************************/ @@ -126273,6 +131982,7 @@ SQLITE_PRIVATE void sqlite3SetTextEncoding(sqlite3 *db, u8 enc){ ** strings is BINARY. */ db->pDfltColl = sqlite3FindCollSeq(db, enc, sqlite3StrBINARY, 0); + sqlite3ExpirePreparedStatements(db, 1); } /* @@ -126578,19 +132288,21 @@ SQLITE_PRIVATE void sqlite3SchemaClear(void *p){ Hash temp2; HashElem *pElem; Schema *pSchema = (Schema *)p; + sqlite3 xdb; + memset(&xdb, 0, sizeof(xdb)); temp1 = pSchema->tblHash; temp2 = pSchema->trigHash; sqlite3HashInit(&pSchema->trigHash); sqlite3HashClear(&pSchema->idxHash); for(pElem=sqliteHashFirst(&temp2); pElem; pElem=sqliteHashNext(pElem)){ - sqlite3DeleteTrigger(0, (Trigger*)sqliteHashData(pElem)); + sqlite3DeleteTrigger(&xdb, (Trigger*)sqliteHashData(pElem)); } sqlite3HashClear(&temp2); sqlite3HashInit(&pSchema->tblHash); for(pElem=sqliteHashFirst(&temp1); pElem; pElem=sqliteHashNext(pElem)){ Table *pTab = sqliteHashData(pElem); - sqlite3DeleteTable(0, pTab); + sqlite3DeleteTable(&xdb, pTab); } sqlite3HashClear(&temp1); sqlite3HashClear(&pSchema->fkeyHash); @@ -126661,8 +132373,9 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){ Table *pTab; assert( pItem && pSrc->nSrc>=1 ); pTab = sqlite3LocateTableItem(pParse, 0, pItem); - sqlite3DeleteTable(pParse->db, pItem->pTab); + if( pItem->pTab ) sqlite3DeleteTable(pParse->db, pItem->pTab); pItem->pTab = pTab; + pItem->fg.notCte = 1; if( pTab ){ pTab->nTabRef++; if( pItem->fg.isIndexedBy && sqlite3IndexedByLookup(pParse, pItem) ){ @@ -126689,18 +132402,42 @@ SQLITE_PRIVATE void sqlite3CodeChangeCount(Vdbe *v, int regCounter, const char * ** 1) It is a virtual table and no implementation of the xUpdate method ** has been provided ** -** 2) It is a system table (i.e. sqlite_schema), this call is not +** 2) A trigger is currently being coded and the table is a virtual table +** that is SQLITE_VTAB_DIRECTONLY or if PRAGMA trusted_schema=OFF and +** the table is not SQLITE_VTAB_INNOCUOUS. +** +** 3) It is a system table (i.e. sqlite_schema), this call is not ** part of a nested parse and writable_schema pragma has not ** been specified ** -** 3) The table is a shadow table, the database connection is in +** 4) The table is a shadow table, the database connection is in ** defensive mode, and the current sqlite3_prepare() ** is for a top-level SQL statement. */ +static int vtabIsReadOnly(Parse *pParse, Table *pTab){ + if( sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0 ){ + return 1; + } + + /* Within triggers: + ** * Do not allow DELETE, INSERT, or UPDATE of SQLITE_VTAB_DIRECTONLY + ** virtual tables + ** * Only allow DELETE, INSERT, or UPDATE of non-SQLITE_VTAB_INNOCUOUS + ** virtual tables if PRAGMA trusted_schema=ON. + */ + if( pParse->pToplevel!=0 + && pTab->u.vtab.p->eVtabRisk > + ((pParse->db->flags & SQLITE_TrustedSchema)!=0) + ){ + sqlite3ErrorMsg(pParse, "unsafe use of virtual table \"%s\"", + pTab->zName); + } + return 0; +} static int tabIsReadOnly(Parse *pParse, Table *pTab){ sqlite3 *db; if( IsVirtual(pTab) ){ - return sqlite3GetVTable(pParse->db, pTab)->pMod->pModule->xUpdate==0; + return vtabIsReadOnly(pParse, pTab); } if( (pTab->tabFlags & (TF_Readonly|TF_Shadow))==0 ) return 0; db = pParse->db; @@ -126712,17 +132449,21 @@ static int tabIsReadOnly(Parse *pParse, Table *pTab){ } /* -** Check to make sure the given table is writable. If it is not -** writable, generate an error message and return 1. If it is -** writable return 0; +** Check to make sure the given table is writable. +** +** If pTab is not writable -> generate an error message and return 1. +** If pTab is writable but other errors have occurred -> return 1. +** If pTab is writable and no prior errors -> return 0; */ -SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, int viewOk){ +SQLITE_PRIVATE int sqlite3IsReadOnly(Parse *pParse, Table *pTab, Trigger *pTrigger){ if( tabIsReadOnly(pParse, pTab) ){ sqlite3ErrorMsg(pParse, "table %s may not be modified", pTab->zName); return 1; } #ifndef SQLITE_OMIT_VIEW - if( !viewOk && IsView(pTab) ){ + if( IsView(pTab) + && (pTrigger==0 || (pTrigger->bReturning && pTrigger->pNext==0)) + ){ sqlite3ErrorMsg(pParse,"cannot modify %s because it is a view",pTab->zName); return 1; } @@ -126787,7 +132528,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( sqlite3 *db = pParse->db; Expr *pLhs = NULL; /* LHS of IN(SELECT...) operator */ Expr *pInClause = NULL; /* WHERE rowid IN ( select ) */ - ExprList *pEList = NULL; /* Expression list contaning only pSelectRowid */ + ExprList *pEList = NULL; /* Expression list containing only pSelectRowid*/ SrcList *pSelectSrc = NULL; /* SELECT rowid FROM x ... (dup of pSrc) */ Select *pSelect = NULL; /* Complete SELECT tree */ Table *pTab; @@ -126825,14 +132566,20 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( ); }else{ Index *pPk = sqlite3PrimaryKeyIndex(pTab); + assert( pPk!=0 ); + assert( pPk->nKeyCol>=1 ); if( pPk->nKeyCol==1 ){ - const char *zName = pTab->aCol[pPk->aiColumn[0]].zCnName; + const char *zName; + assert( pPk->aiColumn[0]>=0 && pPk->aiColumn[0]nCol ); + zName = pTab->aCol[pPk->aiColumn[0]].zCnName; pLhs = sqlite3Expr(db, TK_ID, zName); pEList = sqlite3ExprListAppend(pParse, 0, sqlite3Expr(db, TK_ID, zName)); }else{ int i; for(i=0; inKeyCol; i++){ - Expr *p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName); + Expr *p; + assert( pPk->aiColumn[i]>=0 && pPk->aiColumn[i]nCol ); + p = sqlite3Expr(db, TK_ID, pTab->aCol[pPk->aiColumn[i]].zCnName); pEList = sqlite3ExprListAppend(pParse, pEList, p); } pLhs = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); @@ -126861,7 +132608,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere( pOrderBy,0,pLimit ); - /* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */ + /* now generate the new WHERE rowid IN clause for the DELETE/UPDATE */ pInClause = sqlite3PExpr(pParse, TK_IN, pLhs, 0); sqlite3PExprAddSelect(pParse, pInClause, pSelect); return pInClause; @@ -126976,7 +132723,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( goto delete_from_cleanup; } - if( sqlite3IsReadOnly(pParse, pTab, (pTrigger?1:0)) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto delete_from_cleanup; } iDb = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -127075,21 +132822,22 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( } for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){ assert( pIdx->pSchema==pTab->pSchema ); - sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb); if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ - sqlite3VdbeChangeP3(v, -1, memCnt ? memCnt : -1); + sqlite3VdbeAddOp3(v, OP_Clear, pIdx->tnum, iDb, memCnt ? memCnt : -1); + }else{ + sqlite3VdbeAddOp2(v, OP_Clear, pIdx->tnum, iDb); } } }else #endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */ { u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK; - if( sNC.ncFlags & NC_VarSelect ) bComplex = 1; + if( sNC.ncFlags & NC_Subquery ) bComplex = 1; wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW); if( HasRowid(pTab) ){ /* For a rowid table, initialize the RowSet to an empty set */ pPk = 0; - nPk = 1; + assert( nPk==1 ); iRowSet = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Null, 0, iRowSet); }else{ @@ -127117,7 +132865,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( if( pWInfo==0 ) goto delete_from_cleanup; eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass); assert( IsVirtual(pTab)==0 || eOnePass!=ONEPASS_MULTI ); - assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF ); + assert( IsVirtual(pTab) || bComplex || eOnePass!=ONEPASS_OFF + || OptimizationDisabled(db, SQLITE_OnePass) ); if( eOnePass!=ONEPASS_SINGLE ) sqlite3MultiWrite(pParse); if( sqlite3WhereUsesDeferredSeek(pWInfo) ){ sqlite3VdbeAddOp1(v, OP_FinishSeek, iTabCur); @@ -127277,7 +133026,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom( sqlite3ExprListDelete(db, pOrderBy); sqlite3ExprDelete(db, pLimit); #endif - sqlite3DbFree(db, aToOpen); + if( aToOpen ) sqlite3DbNNFreeNN(db, aToOpen); return; } /* Make sure "isView" and other macros defined above are undefined. Otherwise @@ -127454,9 +133203,11 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete( sqlite3FkActions(pParse, pTab, 0, iOld, 0, 0); /* Invoke AFTER DELETE trigger programs. */ - sqlite3CodeRowTrigger(pParse, pTrigger, - TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel - ); + if( pTrigger ){ + sqlite3CodeRowTrigger(pParse, pTrigger, + TK_DELETE, 0, TRIGGER_AFTER, pTab, iOld, onconf, iLabel + ); + } /* Jump here if the row had already been deleted before any BEFORE ** trigger programs were invoked. Or if a trigger program throws a @@ -127769,6 +133520,42 @@ static void lengthFunc( } } +/* +** Implementation of the octet_length() function +*/ +static void bytelengthFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + assert( argc==1 ); + UNUSED_PARAMETER(argc); + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_BLOB: { + sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); + break; + } + case SQLITE_INTEGER: + case SQLITE_FLOAT: { + i64 m = sqlite3_context_db_handle(context)->enc<=SQLITE_UTF8 ? 1 : 2; + sqlite3_result_int64(context, sqlite3_value_bytes(argv[0])*m); + break; + } + case SQLITE_TEXT: { + if( sqlite3_value_encoding(argv[0])<=SQLITE_UTF8 ){ + sqlite3_result_int(context, sqlite3_value_bytes(argv[0])); + }else{ + sqlite3_result_int(context, sqlite3_value_bytes16(argv[0])); + } + break; + } + default: { + sqlite3_result_null(context); + break; + } + } +} + /* ** Implementation of the abs() function. ** @@ -128045,7 +133832,7 @@ static void roundFunc(sqlite3_context *context, int argc, sqlite3_value **argv){ }else if( n==0 ){ r = (double)((sqlite_int64)(r+(r<0?-0.5:+0.5))); }else{ - zBuf = sqlite3_mprintf("%.*f",n,r); + zBuf = sqlite3_mprintf("%!.*f",n,r); if( zBuf==0 ){ sqlite3_result_error_nomem(context); return; @@ -128245,7 +134032,7 @@ struct compareInfo { /* ** For LIKE and GLOB matching on EBCDIC machines, assume that every -** character is exactly one byte in size. Also, provde the Utf8Read() +** character is exactly one byte in size. Also, provide the Utf8Read() ** macro for fast reading of the next character in the common case where ** the next character is ASCII. */ @@ -128360,7 +134147,7 @@ static int patternCompare( ** c but in the other case and search the input string for either ** c or cx. */ - if( c<=0x80 ){ + if( c<0x80 ){ char zStop[3]; int bMatch; if( noCase ){ @@ -128443,7 +134230,13 @@ static int patternCompare( ** non-zero if there is no match. */ SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){ - return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '['); + if( zString==0 ){ + return zGlobPattern!=0; + }else if( zGlobPattern==0 ){ + return 1; + }else { + return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '['); + } } /* @@ -128451,7 +134244,13 @@ SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){ ** a miss - like strcmp(). */ SQLITE_API int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){ - return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc); + if( zStr==0 ){ + return zPattern!=0; + }else if( zPattern==0 ){ + return 1; + }else{ + return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc); + } } /* @@ -128466,7 +134265,7 @@ SQLITE_API int sqlite3_like_count = 0; /* ** Implementation of the like() SQL function. This function implements -** the build-in LIKE operator. The first argument to the function is the +** the built-in LIKE operator. The first argument to the function is the ** pattern and the second argument is the string. So, the SQL statements: ** ** A LIKE B @@ -128673,13 +134472,13 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ double r1, r2; const char *zVal; r1 = sqlite3_value_double(pValue); - sqlite3_str_appendf(pStr, "%!.15g", r1); + sqlite3_str_appendf(pStr, "%!0.15g", r1); zVal = sqlite3_str_value(pStr); if( zVal ){ sqlite3AtoF(zVal, &r2, pStr->nChar, SQLITE_UTF8); if( r1!=r2 ){ sqlite3_str_reset(pStr); - sqlite3_str_appendf(pStr, "%!.20e", r1); + sqlite3_str_appendf(pStr, "%!0.20e", r1); } } break; @@ -128690,7 +134489,7 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){ } case SQLITE_BLOB: { char const *zBlob = sqlite3_value_blob(pValue); - int nBlob = sqlite3_value_bytes(pValue); + i64 nBlob = sqlite3_value_bytes(pValue); assert( zBlob==sqlite3_value_blob(pValue) ); /* No encoding change */ sqlite3StrAccumEnlarge(pStr, nBlob*2 + 4); if( pStr->accError==0 ){ @@ -128799,6 +134598,7 @@ static void charFunc( *zOut++ = 0x80 + (u8)(c & 0x3F); } \ } + *zOut = 0; sqlite3_result_text64(context, (char*)z, zOut-z, sqlite3_free, SQLITE_UTF8); } @@ -128827,10 +134627,101 @@ static void hexFunc( *(z++) = hexdigits[c&0xf]; } *z = 0; - sqlite3_result_text(context, zHex, n*2, sqlite3_free); + sqlite3_result_text64(context, zHex, (u64)(z-zHex), + sqlite3_free, SQLITE_UTF8); + } +} + +/* +** Buffer zStr contains nStr bytes of utf-8 encoded text. Return 1 if zStr +** contains character ch, or 0 if it does not. +*/ +static int strContainsChar(const u8 *zStr, int nStr, u32 ch){ + const u8 *zEnd = &zStr[nStr]; + const u8 *z = zStr; + while( z0 ){ + const char *v = (const char*)sqlite3_value_text(argv[i]); + if( v!=0 ){ + if( j>0 && nSep>0 ){ + memcpy(&z[j], zSep, nSep); + j += nSep; + } + memcpy(&z[j], v, k); + j += k; + } + } + } + z[j] = 0; + assert( j<=n ); + sqlite3_result_text64(context, z, j, sqlite3_free, SQLITE_UTF8); +} + +/* +** The CONCAT(...) function. Generate a string result that is the +** concatentation of all non-null arguments. +*/ +static void concatFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + concatFuncCore(context, argc, argv, 0, ""); +} + +/* +** The CONCAT_WS(separator, ...) function. +** +** Generate a string that is the concatenation of 2nd through the Nth +** argument. Use the first argument (which must be non-NULL) as the +** separator. +*/ +static void concatwsFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + int nSep = sqlite3_value_bytes(argv[0]); + const char *zSep = (const char*)sqlite3_value_text(argv[0]); + if( zSep==0 ) return; + concatFuncCore(context, argc-1, argv+1, nSep, zSep); +} + #ifdef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION /* ** The "unknown" function is automatically substituted in place of ** any unrecognized function name when doing an EXPLAIN or EXPLAIN QUERY PLAN -** when the SQLITE_ENABLE_UNKNOWN_FUNCTION compile-time option is used. +** when the SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION compile-time option is used. ** When the "sqlite3" command-line shell is built using this functionality, ** that allows an EXPLAIN or EXPLAIN QUERY PLAN for complex queries ** involving application-defined functions to be examined in a generic @@ -129048,6 +135014,9 @@ static void unknownFunc( sqlite3_value **argv ){ /* no-op */ + (void)context; + (void)argc; + (void)argv; } #endif /*SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION*/ @@ -129149,13 +135118,68 @@ static void loadExt(sqlite3_context *context, int argc, sqlite3_value **argv){ */ typedef struct SumCtx SumCtx; struct SumCtx { - double rSum; /* Floating point sum */ - i64 iSum; /* Integer sum */ + double rSum; /* Running sum as as a double */ + double rErr; /* Error term for Kahan-Babushka-Neumaier summation */ + i64 iSum; /* Running sum as a signed integer */ i64 cnt; /* Number of elements summed */ - u8 overflow; /* True if integer overflow seen */ - u8 approx; /* True if non-integer value was input to the sum */ + u8 approx; /* True if any non-integer value was input to the sum */ + u8 ovrfl; /* Integer overflow seen */ }; +/* +** Do one step of the Kahan-Babushka-Neumaier summation. +** +** https://en.wikipedia.org/wiki/Kahan_summation_algorithm +** +** Variables are marked "volatile" to defeat c89 x86 floating point +** optimizations can mess up this algorithm. +*/ +static void kahanBabuskaNeumaierStep( + volatile SumCtx *pSum, + volatile double r +){ + volatile double s = pSum->rSum; + volatile double t = s + r; + if( fabs(s) > fabs(r) ){ + pSum->rErr += (s - t) + r; + }else{ + pSum->rErr += (r - t) + s; + } + pSum->rSum = t; +} + +/* +** Add a (possibly large) integer to the running sum. +*/ +static void kahanBabuskaNeumaierStepInt64(volatile SumCtx *pSum, i64 iVal){ + if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){ + i64 iBig, iSm; + iSm = iVal % 16384; + iBig = iVal - iSm; + kahanBabuskaNeumaierStep(pSum, iBig); + kahanBabuskaNeumaierStep(pSum, iSm); + }else{ + kahanBabuskaNeumaierStep(pSum, (double)iVal); + } +} + +/* +** Initialize the Kahan-Babaska-Neumaier sum from a 64-bit integer +*/ +static void kahanBabuskaNeumaierInit( + volatile SumCtx *p, + i64 iVal +){ + if( iVal<=-4503599627370496LL || iVal>=+4503599627370496LL ){ + i64 iSm = iVal % 16384; + p->rSum = (double)(iVal - iSm); + p->rErr = (double)iSm; + }else{ + p->rSum = (double)iVal; + p->rErr = 0.0; + } +} + /* ** Routines used to compute the sum, average, and total. ** @@ -129175,15 +135199,29 @@ static void sumStep(sqlite3_context *context, int argc, sqlite3_value **argv){ type = sqlite3_value_numeric_type(argv[0]); if( p && type!=SQLITE_NULL ){ p->cnt++; - if( type==SQLITE_INTEGER ){ - i64 v = sqlite3_value_int64(argv[0]); - p->rSum += v; - if( (p->approx|p->overflow)==0 && sqlite3AddInt64(&p->iSum, v) ){ - p->approx = p->overflow = 1; + if( p->approx==0 ){ + if( type!=SQLITE_INTEGER ){ + kahanBabuskaNeumaierInit(p, p->iSum); + p->approx = 1; + kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0])); + }else{ + i64 x = p->iSum; + if( sqlite3AddInt64(&x, sqlite3_value_int64(argv[0]))==0 ){ + p->iSum = x; + }else{ + p->ovrfl = 1; + kahanBabuskaNeumaierInit(p, p->iSum); + p->approx = 1; + kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0])); + } } }else{ - p->rSum += sqlite3_value_double(argv[0]); - p->approx = 1; + if( type==SQLITE_INTEGER ){ + kahanBabuskaNeumaierStepInt64(p, sqlite3_value_int64(argv[0])); + }else{ + p->ovrfl = 0; + kahanBabuskaNeumaierStep(p, sqlite3_value_double(argv[0])); + } } } } @@ -129200,13 +135238,18 @@ static void sumInverse(sqlite3_context *context, int argc, sqlite3_value**argv){ if( ALWAYS(p) && type!=SQLITE_NULL ){ assert( p->cnt>0 ); p->cnt--; - assert( type==SQLITE_INTEGER || p->approx ); - if( type==SQLITE_INTEGER && p->approx==0 ){ - i64 v = sqlite3_value_int64(argv[0]); - p->rSum -= v; - p->iSum -= v; + if( !p->approx ){ + p->iSum -= sqlite3_value_int64(argv[0]); + }else if( type==SQLITE_INTEGER ){ + i64 iVal = sqlite3_value_int64(argv[0]); + if( iVal!=SMALLEST_INT64 ){ + kahanBabuskaNeumaierStepInt64(p, -iVal); + }else{ + kahanBabuskaNeumaierStepInt64(p, LARGEST_INT64); + kahanBabuskaNeumaierStepInt64(p, 1); + } }else{ - p->rSum -= sqlite3_value_double(argv[0]); + kahanBabuskaNeumaierStep(p, -sqlite3_value_double(argv[0])); } } } @@ -129217,10 +135260,14 @@ static void sumFinalize(sqlite3_context *context){ SumCtx *p; p = sqlite3_aggregate_context(context, 0); if( p && p->cnt>0 ){ - if( p->overflow ){ - sqlite3_result_error(context,"integer overflow",-1); - }else if( p->approx ){ - sqlite3_result_double(context, p->rSum); + if( p->approx ){ + if( p->ovrfl ){ + sqlite3_result_error(context,"integer overflow",-1); + }else if( !sqlite3IsOverflow(p->rErr) ){ + sqlite3_result_double(context, p->rSum+p->rErr); + }else{ + sqlite3_result_double(context, p->rSum); + } }else{ sqlite3_result_int64(context, p->iSum); } @@ -129230,14 +135277,29 @@ static void avgFinalize(sqlite3_context *context){ SumCtx *p; p = sqlite3_aggregate_context(context, 0); if( p && p->cnt>0 ){ - sqlite3_result_double(context, p->rSum/(double)p->cnt); + double r; + if( p->approx ){ + r = p->rSum; + if( !sqlite3IsOverflow(p->rErr) ) r += p->rErr; + }else{ + r = (double)(p->iSum); + } + sqlite3_result_double(context, r/(double)p->cnt); } } static void totalFinalize(sqlite3_context *context){ SumCtx *p; + double r = 0.0; p = sqlite3_aggregate_context(context, 0); - /* (double)0 In case of SQLITE_OMIT_FLOATING_POINT... */ - sqlite3_result_double(context, p ? p->rSum : (double)0); + if( p ){ + if( p->approx ){ + r = p->rSum; + if( !sqlite3IsOverflow(p->rErr) ) r += p->rErr; + }else{ + r = (double)(p->iSum); + } + } + sqlite3_result_double(context, r); } /* @@ -129356,6 +135418,7 @@ static void minMaxFinalize(sqlite3_context *context){ /* ** group_concat(EXPR, ?SEPARATOR?) +** string_agg(EXPR, SEPARATOR) ** ** The SEPARATOR goes before the EXPR string. This is tragic. The ** groupConcatInverse() implementation would have been easier if the @@ -129459,7 +135522,7 @@ static void groupConcatInverse( if( sqlite3_value_type(argv[0])==SQLITE_NULL ) return; pGCC = (GroupConcatCtx*)sqlite3_aggregate_context(context, sizeof(*pGCC)); /* pGCC is always non-NULL since groupConcatStep() will have always - ** run frist to initialize it */ + ** run first to initialize it */ if( ALWAYS(pGCC) ){ int nVS; /* Must call sqlite3_value_text() to convert the argument into text prior @@ -129554,8 +135617,10 @@ SQLITE_PRIVATE void sqlite3RegisterPerConnectionBuiltinFunctions(sqlite3 *db){ ** sensitive. */ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive){ + FuncDef *pDef; struct compareInfo *pInfo; int flags; + int nArg; if( caseSensitive ){ pInfo = (struct compareInfo*)&likeInfoAlt; flags = SQLITE_FUNC_LIKE | SQLITE_FUNC_CASE; @@ -129563,10 +135628,13 @@ SQLITE_PRIVATE void sqlite3RegisterLikeFunctions(sqlite3 *db, int caseSensitive) pInfo = (struct compareInfo*)&likeInfoNorm; flags = SQLITE_FUNC_LIKE; } - sqlite3CreateFunc(db, "like", 2, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0); - sqlite3CreateFunc(db, "like", 3, SQLITE_UTF8, pInfo, likeFunc, 0, 0, 0, 0, 0); - sqlite3FindFunction(db, "like", 2, SQLITE_UTF8, 0)->funcFlags |= flags; - sqlite3FindFunction(db, "like", 3, SQLITE_UTF8, 0)->funcFlags |= flags; + for(nArg=2; nArg<=3; nArg++){ + sqlite3CreateFunc(db, "like", nArg, SQLITE_UTF8, pInfo, likeFunc, + 0, 0, 0, 0, 0); + pDef = sqlite3FindFunction(db, "like", nArg, SQLITE_UTF8, 0); + pDef->funcFlags |= flags; + pDef->funcFlags &= ~SQLITE_FUNC_UNSAFE; + } } /* @@ -129687,6 +135755,18 @@ static void ceilingFunc( static double xCeil(double x){ return ceil(x); } static double xFloor(double x){ return floor(x); } +/* +** Some systems do not have log2() and log10() in their standard math +** libraries. +*/ +#if defined(HAVE_LOG10) && HAVE_LOG10==0 +# define log10(X) (0.4342944819032517867*log(X)) +#endif +#if defined(HAVE_LOG2) && HAVE_LOG2==0 +# define log2(X) (1.442695040888963456*log(X)) +#endif + + /* ** Implementation of SQL functions: ** @@ -129725,17 +135805,15 @@ static void logFunc( } ans = log(x)/b; }else{ - ans = log(x); switch( SQLITE_PTR_TO_INT(sqlite3_user_data(context)) ){ case 1: - /* Convert from natural logarithm to log base 10 */ - ans /= M_LN10; + ans = log10(x); break; case 2: - /* Convert from natural logarithm to log base 2 */ - ans /= M_LN2; + ans = log2(x); break; default: + ans = log(x); break; } } @@ -129804,6 +135882,7 @@ static void piFunc( sqlite3_value **argv ){ assert( argc==0 ); + (void)argv; sqlite3_result_double(context, M_PI); } @@ -129827,6 +135906,37 @@ static void signFunc( sqlite3_result_int(context, x<0.0 ? -1 : x>0.0 ? +1 : 0); } +#ifdef SQLITE_DEBUG +/* +** Implementation of fpdecode(x,y,z) function. +** +** x is a real number that is to be decoded. y is the precision. +** z is the maximum real precision. +*/ +static void fpdecodeFunc( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + FpDecode s; + double x; + int y, z; + char zBuf[100]; + UNUSED_PARAMETER(argc); + assert( argc==3 ); + x = sqlite3_value_double(argv[0]); + y = sqlite3_value_int(argv[1]); + z = sqlite3_value_int(argv[2]); + sqlite3FpDecode(&s, x, y, z); + if( s.isSpecial==2 ){ + sqlite3_snprintf(sizeof(zBuf), zBuf, "NaN"); + }else{ + sqlite3_snprintf(sizeof(zBuf), zBuf, "%c%.*s/%d", s.sign, s.n, s.z, s.iDP); + } + sqlite3_result_text(context, zBuf, -1, SQLITE_TRANSIENT); +} +#endif /* SQLITE_DEBUG */ + /* ** All of the FuncDef structures in the aBuiltinFunc[] array above ** to the global function hash table. This occurs at start-time (as @@ -129891,12 +136001,16 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ FUNCTION2(typeof, 1, 0, 0, typeofFunc, SQLITE_FUNC_TYPEOF), FUNCTION2(subtype, 1, 0, 0, subtypeFunc, SQLITE_FUNC_TYPEOF), FUNCTION2(length, 1, 0, 0, lengthFunc, SQLITE_FUNC_LENGTH), + FUNCTION2(octet_length, 1, 0, 0, bytelengthFunc,SQLITE_FUNC_BYTELEN), FUNCTION(instr, 2, 0, 0, instrFunc ), FUNCTION(printf, -1, 0, 0, printfFunc ), FUNCTION(format, -1, 0, 0, printfFunc ), FUNCTION(unicode, 1, 0, 0, unicodeFunc ), FUNCTION(char, -1, 0, 0, charFunc ), FUNCTION(abs, 1, 0, 0, absFunc ), +#ifdef SQLITE_DEBUG + FUNCTION(fpdecode, 3, 0, 0, fpdecodeFunc ), +#endif #ifndef SQLITE_OMIT_FLOATING_POINT FUNCTION(round, 1, 0, 0, roundFunc ), FUNCTION(round, 2, 0, 0, roundFunc ), @@ -129904,6 +136018,13 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ FUNCTION(upper, 1, 0, 0, upperFunc ), FUNCTION(lower, 1, 0, 0, lowerFunc ), FUNCTION(hex, 1, 0, 0, hexFunc ), + FUNCTION(unhex, 1, 0, 0, unhexFunc ), + FUNCTION(unhex, 2, 0, 0, unhexFunc ), + FUNCTION(concat, -1, 0, 0, concatFunc ), + FUNCTION(concat, 0, 0, 0, 0 ), + FUNCTION(concat_ws, -1, 0, 0, concatwsFunc ), + FUNCTION(concat_ws, 0, 0, 0, 0 ), + FUNCTION(concat_ws, 1, 0, 0, 0 ), INLINE_FUNC(ifnull, 2, INLINEFUNC_coalesce, 0 ), VFUNCTION(random, 0, 0, 0, randomFunc ), VFUNCTION(randomblob, 1, 0, 0, randomBlob ), @@ -129933,6 +136054,8 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){ groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), WAGGREGATE(group_concat, 2, 0, 0, groupConcatStep, groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), + WAGGREGATE(string_agg, 2, 0, 0, groupConcatStep, + groupConcatFinalize, groupConcatValue, groupConcatInverse, 0), LIKEFUNC(glob, 2, &globInfo, SQLITE_FUNC_LIKE|SQLITE_FUNC_CASE), #ifdef SQLITE_CASE_SENSITIVE_LIKE @@ -130875,6 +136998,7 @@ static int isSetNullAction(Parse *pParse, FKey *pFKey){ if( (p==pFKey->apTrigger[0] && pFKey->aAction[0]==OE_SetNull) || (p==pFKey->apTrigger[1] && pFKey->aAction[1]==OE_SetNull) ){ + assert( (pTop->db->flags & SQLITE_FkNoAction)==0 ); return 1; } } @@ -131069,6 +137193,8 @@ SQLITE_PRIVATE void sqlite3FkCheck( } if( regOld!=0 ){ int eAction = pFKey->aAction[aChange!=0]; + if( (db->flags & SQLITE_FkNoAction) ) eAction = OE_None; + fkScanChildren(pParse, pSrc, pTab, pIdx, pFKey, aiCol, regOld, 1); /* If this is a deferred FK constraint, or a CASCADE or SET NULL ** action applies, then any foreign key violations caused by @@ -131184,7 +137310,11 @@ SQLITE_PRIVATE int sqlite3FkRequired( /* Check if any parent key columns are being modified. */ for(p=sqlite3FkReferences(pTab); p; p=p->pNextTo){ if( fkParentIsModified(pTab, p, aChange, chngRowid) ){ - if( p->aAction[1]!=OE_None ) return 2; + if( (pParse->db->flags & SQLITE_FkNoAction)==0 + && p->aAction[1]!=OE_None + ){ + return 2; + } bHaveFK = 1; } } @@ -131234,6 +137364,7 @@ static Trigger *fkActionTrigger( int iAction = (pChanges!=0); /* 1 for UPDATE, 0 for DELETE */ action = pFKey->aAction[iAction]; + if( (db->flags & SQLITE_FkNoAction) ) action = OE_None; if( action==OE_Restrict && (db->flags & SQLITE_DeferFKs) ){ return 0; } @@ -131334,22 +137465,22 @@ static Trigger *fkActionTrigger( if( action==OE_Restrict ){ int iDb = sqlite3SchemaToIndex(db, pTab->pSchema); - Token tFrom; - Token tDb; + SrcList *pSrc; Expr *pRaise; - tFrom.z = zFrom; - tFrom.n = nFrom; - tDb.z = db->aDb[iDb].zDbSName; - tDb.n = sqlite3Strlen30(tDb.z); - pRaise = sqlite3Expr(db, TK_RAISE, "FOREIGN KEY constraint failed"); if( pRaise ){ pRaise->affExpr = OE_Abort; } + pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); + if( pSrc ){ + assert( pSrc->nSrc==1 ); + pSrc->a[0].zName = sqlite3DbStrDup(db, zFrom); + pSrc->a[0].zDatabase = sqlite3DbStrDup(db, db->aDb[iDb].zDbSName); + } pSelect = sqlite3SelectNew(pParse, sqlite3ExprListAppend(pParse, 0, pRaise), - sqlite3SrcListAppend(pParse, 0, &tDb, &tFrom), + pSrc, pWhere, 0, 0, 0, 0, 0 ); @@ -131456,17 +137587,17 @@ SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *db, Table *pTab){ FKey *pNext; /* Copy of pFKey->pNextFrom */ assert( IsOrdinaryTable(pTab) ); + assert( db!=0 ); for(pFKey=pTab->u.tab.pFKey; pFKey; pFKey=pNext){ assert( db==0 || sqlite3SchemaMutexHeld(db, 0, pTab->pSchema) ); /* Remove the FK from the fkeyHash hash table. */ - if( !db || db->pnBytesFreed==0 ){ + if( db->pnBytesFreed==0 ){ if( pFKey->pPrevTo ){ pFKey->pPrevTo->pNextTo = pFKey->pNextTo; }else{ - void *p = (void *)pFKey->pNextTo; - const char *z = (p ? pFKey->pNextTo->zTo : pFKey->zTo); - sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, p); + const char *z = (pFKey->pNextTo ? pFKey->pNextTo->zTo : pFKey->zTo); + sqlite3HashInsert(&pTab->pSchema->fkeyHash, z, pFKey->pNextTo); } if( pFKey->pNextTo ){ pFKey->pNextTo->pPrevTo = pFKey->pPrevTo; @@ -131529,8 +137660,10 @@ SQLITE_PRIVATE void sqlite3OpenTable( assert( pParse->pVdbe!=0 ); v = pParse->pVdbe; assert( opcode==OP_OpenWrite || opcode==OP_OpenRead ); - sqlite3TableLock(pParse, iDb, pTab->tnum, - (opcode==OP_OpenWrite)?1:0, pTab->zName); + if( !pParse->db->noSharedCache ){ + sqlite3TableLock(pParse, iDb, pTab->tnum, + (opcode==OP_OpenWrite)?1:0, pTab->zName); + } if( HasRowid(pTab) ){ sqlite3VdbeAddOp4Int(v, opcode, iCur, pTab->tnum, iDb, pTab->nNVCol); VdbeComment((v, "%s", pTab->zName)); @@ -131564,43 +137697,68 @@ SQLITE_PRIVATE void sqlite3OpenTable( ** is managed along with the rest of the Index structure. It will be ** released when sqlite3DeleteIndex() is called. */ -SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ +static SQLITE_NOINLINE const char *computeIndexAffStr(sqlite3 *db, Index *pIdx){ + /* The first time a column affinity string for a particular index is + ** required, it is allocated and populated here. It is then stored as + ** a member of the Index structure for subsequent use. + ** + ** The column affinity string will eventually be deleted by + ** sqliteDeleteIndex() when the Index structure itself is cleaned + ** up. + */ + int n; + Table *pTab = pIdx->pTable; + pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); if( !pIdx->zColAff ){ - /* The first time a column affinity string for a particular index is - ** required, it is allocated and populated here. It is then stored as - ** a member of the Index structure for subsequent use. - ** - ** The column affinity string will eventually be deleted by - ** sqliteDeleteIndex() when the Index structure itself is cleaned - ** up. - */ - int n; - Table *pTab = pIdx->pTable; - pIdx->zColAff = (char *)sqlite3DbMallocRaw(0, pIdx->nColumn+1); - if( !pIdx->zColAff ){ - sqlite3OomFault(db); - return 0; + sqlite3OomFault(db); + return 0; + } + for(n=0; nnColumn; n++){ + i16 x = pIdx->aiColumn[n]; + char aff; + if( x>=0 ){ + aff = pTab->aCol[x].affinity; + }else if( x==XN_ROWID ){ + aff = SQLITE_AFF_INTEGER; + }else{ + assert( x==XN_EXPR ); + assert( pIdx->bHasExpr ); + assert( pIdx->aColExpr!=0 ); + aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); } - for(n=0; nnColumn; n++){ - i16 x = pIdx->aiColumn[n]; - char aff; - if( x>=0 ){ - aff = pTab->aCol[x].affinity; - }else if( x==XN_ROWID ){ - aff = SQLITE_AFF_INTEGER; - }else{ - assert( x==XN_EXPR ); - assert( pIdx->aColExpr!=0 ); - aff = sqlite3ExprAffinity(pIdx->aColExpr->a[n].pExpr); + if( affSQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC; + pIdx->zColAff[n] = aff; + } + pIdx->zColAff[n] = 0; + return pIdx->zColAff; +} +SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ + if( !pIdx->zColAff ) return computeIndexAffStr(db, pIdx); + return pIdx->zColAff; +} + + +/* +** Compute an affinity string for a table. Space is obtained +** from sqlite3DbMalloc(). The caller is responsible for freeing +** the space when done. +*/ +SQLITE_PRIVATE char *sqlite3TableAffinityStr(sqlite3 *db, const Table *pTab){ + char *zColAff; + zColAff = (char *)sqlite3DbMallocRaw(db, pTab->nCol+1); + if( zColAff ){ + int i, j; + for(i=j=0; inCol; i++){ + if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ + zColAff[j++] = pTab->aCol[i].affinity; } - if( affSQLITE_AFF_NUMERIC) aff = SQLITE_AFF_NUMERIC; - pIdx->zColAff[n] = aff; } - pIdx->zColAff[n] = 0; + do{ + zColAff[j--] = 0; + }while( j>=0 && zColAff[j]<=SQLITE_AFF_BLOB ); } - - return pIdx->zColAff; + return zColAff; } /* @@ -131634,7 +137792,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ ** For STRICT tables: ** ------------------ ** -** Generate an appropropriate OP_TypeCheck opcode that will verify the +** Generate an appropriate OP_TypeCheck opcode that will verify the ** datatypes against the column definitions in pTab. If iReg==0, that ** means an OP_MakeRecord opcode has already been generated and should be ** the last opcode generated. The new OP_TypeCheck needs to be inserted @@ -131644,7 +137802,7 @@ SQLITE_PRIVATE const char *sqlite3IndexAffinityStr(sqlite3 *db, Index *pIdx){ ** Apply the type checking to that array of registers. */ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ - int i, j; + int i; char *zColAff; if( pTab->tabFlags & TF_Strict ){ if( iReg==0 ){ @@ -131653,7 +137811,7 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ ** OP_MakeRecord is found */ VdbeOp *pPrev; sqlite3VdbeAppendP4(v, pTab, P4_TABLE); - pPrev = sqlite3VdbeGetOp(v, -1); + pPrev = sqlite3VdbeGetLastOp(v); assert( pPrev!=0 ); assert( pPrev->opcode==OP_MakeRecord || sqlite3VdbeDb(v)->mallocFailed ); pPrev->opcode = OP_TypeCheck; @@ -131667,22 +137825,11 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ } zColAff = pTab->zColAff; if( zColAff==0 ){ - sqlite3 *db = sqlite3VdbeDb(v); - zColAff = (char *)sqlite3DbMallocRaw(0, pTab->nCol+1); + zColAff = sqlite3TableAffinityStr(0, pTab); if( !zColAff ){ - sqlite3OomFault(db); + sqlite3OomFault(sqlite3VdbeDb(v)); return; } - - for(i=j=0; inCol; i++){ - assert( pTab->aCol[i].affinity!=0 || sqlite3VdbeParser(v)->nErr>0 ); - if( (pTab->aCol[i].colFlags & COLFLAG_VIRTUAL)==0 ){ - zColAff[j++] = pTab->aCol[i].affinity; - } - } - do{ - zColAff[j--] = 0; - }while( j>=0 && zColAff[j]<=SQLITE_AFF_BLOB ); pTab->zColAff = zColAff; } assert( zColAff!=0 ); @@ -131691,7 +137838,7 @@ SQLITE_PRIVATE void sqlite3TableAffinity(Vdbe *v, Table *pTab, int iReg){ if( iReg ){ sqlite3VdbeAddOp4(v, OP_Affinity, iReg, i, 0, zColAff, i); }else{ - assert( sqlite3VdbeGetOp(v, -1)->opcode==OP_MakeRecord + assert( sqlite3VdbeGetLastOp(v)->opcode==OP_MakeRecord || sqlite3VdbeDb(v)->mallocFailed ); sqlite3VdbeChangeP4(v, -1, zColAff, i); } @@ -131777,7 +137924,7 @@ SQLITE_PRIVATE void sqlite3ComputeGeneratedColumns( */ sqlite3TableAffinity(pParse->pVdbe, pTab, iRegStore); if( (pTab->tabFlags & TF_HasStored)!=0 ){ - pOp = sqlite3VdbeGetOp(pParse->pVdbe,-1); + pOp = sqlite3VdbeGetLastOp(pParse->pVdbe); if( pOp->opcode==OP_Affinity ){ /* Change the OP_Affinity argument to '@' (NONE) for all stored ** columns. '@' is the no-op affinity and those columns have not @@ -132276,7 +138423,7 @@ SQLITE_PRIVATE void sqlite3Insert( /* Cannot insert into a read-only table. */ - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto insert_cleanup; } @@ -132564,7 +138711,7 @@ SQLITE_PRIVATE void sqlite3Insert( pNx->iDataCur = iDataCur; pNx->iIdxCur = iIdxCur; if( pNx->pUpsertTarget ){ - if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx) ){ + if( sqlite3UpsertAnalyzeTarget(pParse, pTabList, pNx, pUpsert) ){ goto insert_cleanup; } } @@ -132683,7 +138830,12 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3VdbeAddOp2(v, OP_SCopy, regFromSelect+k, iRegStore); } }else{ - sqlite3ExprCode(pParse, pList->a[k].pExpr, iRegStore); + Expr *pX = pList->a[k].pExpr; + int y = sqlite3ExprCodeTarget(pParse, pX, iRegStore); + if( y!=iRegStore ){ + sqlite3VdbeAddOp2(v, + ExprHasProperty(pX, EP_Subquery) ? OP_Copy : OP_SCopy, y, iRegStore); + } } } @@ -132718,7 +138870,7 @@ SQLITE_PRIVATE void sqlite3Insert( } /* Copy the new data already generated. */ - assert( pTab->nNVCol>0 ); + assert( pTab->nNVCol>0 || pParse->nErr>0 ); sqlite3VdbeAddOp3(v, OP_Copy, regRowid+1, regCols+1, pTab->nNVCol-1); #ifndef SQLITE_OMIT_GENERATED_COLUMNS @@ -132820,7 +138972,9 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur, regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace, 0, pUpsert ); - sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0); + if( db->flags & SQLITE_ForeignKeys ){ + sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0); + } /* Set the OPFLAG_USESEEKRESULT flag if either (a) there are no REPLACE ** constraints or (b) there are no triggers and this table is not a @@ -132904,7 +139058,7 @@ SQLITE_PRIVATE void sqlite3Insert( sqlite3UpsertDelete(db, pUpsert); sqlite3SelectDelete(db, pSelect); sqlite3IdListDelete(db, pColumn); - sqlite3DbFree(db, aRegIdx); + if( aRegIdx ) sqlite3DbNNFreeNN(db, aRegIdx); } /* Make sure "isView" and other macros defined above are undefined. Otherwise @@ -132930,7 +139084,7 @@ SQLITE_PRIVATE void sqlite3Insert( /* This is the Walker callback from sqlite3ExprReferencesUpdatedColumn(). * Set bit 0x01 of pWalker->eCode if pWalker->eCode to 0 and if this ** expression node references any of the -** columns that are being modifed by an UPDATE statement. +** columns that are being modified by an UPDATE statement. */ static int checkConstraintExprNode(Walker *pWalker, Expr *pExpr){ if( pExpr->op==TK_COLUMN ){ @@ -133153,7 +139307,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( int *aiChng, /* column i is unchanged if aiChng[i]<0 */ Upsert *pUpsert /* ON CONFLICT clauses, if any. NULL otherwise */ ){ - Vdbe *v; /* VDBE under constrution */ + Vdbe *v; /* VDBE under construction */ Index *pIdx; /* Pointer to one of the indices */ Index *pPk = 0; /* The PRIMARY KEY index for WITHOUT ROWID tables */ sqlite3 *db; /* Database connection */ @@ -133268,6 +139422,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( case OE_Fail: { char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName, pCol->zCnName); + testcase( zMsg==0 && db->mallocFailed==0 ); sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError, iReg); sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC); @@ -133635,7 +139790,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks( pIdx; pIdx = indexIteratorNext(&sIdxIter, &ix) ){ - int regIdx; /* Range of registers hold conent for pIdx */ + int regIdx; /* Range of registers holding content for pIdx */ int regR; /* Range of registers holding conflicting PK */ int iThisCur; /* Cursor for this UNIQUE index */ int addrUniqueOk; /* Jump here if the UNIQUE constraint is satisfied */ @@ -134130,6 +140285,8 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices( assert( op==OP_OpenRead || op==OP_OpenWrite ); assert( op==OP_OpenWrite || p5==0 ); + assert( piDataCur!=0 ); + assert( piIdxCur!=0 ); if( IsVirtual(pTab) ){ /* This routine is a no-op for virtual tables. Leave the output ** variables *piDataCur and *piIdxCur set to illegal cursor numbers @@ -134142,18 +140299,18 @@ SQLITE_PRIVATE int sqlite3OpenTableAndIndices( assert( v!=0 ); if( iBase<0 ) iBase = pParse->nTab; iDataCur = iBase++; - if( piDataCur ) *piDataCur = iDataCur; + *piDataCur = iDataCur; if( HasRowid(pTab) && (aToOpen==0 || aToOpen[0]) ){ sqlite3OpenTable(pParse, iDataCur, iDb, pTab, op); - }else{ + }else if( pParse->db->noSharedCache==0 ){ sqlite3TableLock(pParse, iDb, pTab->tnum, op==OP_OpenWrite, pTab->zName); } - if( piIdxCur ) *piIdxCur = iBase; + *piIdxCur = iBase; for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){ int iIdxCur = iBase++; assert( pIdx->pSchema==pTab->pSchema ); if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){ - if( piDataCur ) *piDataCur = iIdxCur; + *piDataCur = iIdxCur; p5 = 0; } if( aToOpen==0 || aToOpen[i+1] ){ @@ -134446,12 +140603,15 @@ static int xferOptimization( } } #ifndef SQLITE_OMIT_CHECK - if( pDest->pCheck && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) ){ + if( pDest->pCheck + && (db->mDbFlags & DBFLAG_Vacuum)==0 + && sqlite3ExprListCompare(pSrc->pCheck,pDest->pCheck,-1) + ){ return 0; /* Tables have different CHECK constraints. Ticket #2252 */ } #endif #ifndef SQLITE_OMIT_FOREIGN_KEY - /* Disallow the transfer optimization if the destination table constains + /* Disallow the transfer optimization if the destination table contains ** any foreign key constraints. This is more restrictive than necessary. ** But the main beneficiary of the transfer optimization is the VACUUM ** command, and the VACUUM command disables foreign key constraints. So @@ -135131,9 +141291,9 @@ struct sqlite3_api_routines { const char *(*filename_journal)(const char*); const char *(*filename_wal)(const char*); /* Version 3.32.0 and later */ - char *(*create_filename)(const char*,const char*,const char*, + const char *(*create_filename)(const char*,const char*,const char*, int,const char**); - void (*free_filename)(char*); + void (*free_filename)(const char*); sqlite3_file *(*database_file_object)(const char*); /* Version 3.34.0 and later */ int (*txn_state)(sqlite3*,const char*); @@ -135157,6 +141317,15 @@ struct sqlite3_api_routines { unsigned char *(*serialize)(sqlite3*,const char *,sqlite3_int64*, unsigned int); const char *(*db_name)(sqlite3*,int); + /* Version 3.40.0 and later */ + int (*value_encoding)(sqlite3_value*); + /* Version 3.41.0 and later */ + int (*is_interrupted)(sqlite3*); + /* Version 3.43.0 and later */ + int (*stmt_explain)(sqlite3_stmt*,int); + /* Version 3.44.0 and later */ + void *(*get_clientdata)(sqlite3*,const char*); + int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*)); }; /* @@ -135481,6 +141650,15 @@ typedef int (*sqlite3_loadext_entry)( #define sqlite3_serialize sqlite3_api->serialize #endif #define sqlite3_db_name sqlite3_api->db_name +/* Version 3.40.0 and later */ +#define sqlite3_value_encoding sqlite3_api->value_encoding +/* Version 3.41.0 and later */ +#define sqlite3_is_interrupted sqlite3_api->is_interrupted +/* Version 3.43.0 and later */ +#define sqlite3_stmt_explain sqlite3_api->stmt_explain +/* Version 3.44.0 and later */ +#define sqlite3_get_clientdata sqlite3_api->get_clientdata +#define sqlite3_set_clientdata sqlite3_api->set_clientdata #endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */ #if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) @@ -135993,7 +142171,16 @@ static const sqlite3_api_routines sqlite3Apis = { 0, 0, #endif - sqlite3_db_name + sqlite3_db_name, + /* Version 3.40.0 and later */ + sqlite3_value_encoding, + /* Version 3.41.0 and later */ + sqlite3_is_interrupted, + /* Version 3.43.0 and later */ + sqlite3_stmt_explain, + /* Version 3.44.0 and later */ + sqlite3_get_clientdata, + sqlite3_set_clientdata }; /* True if x is the directory separator character @@ -136066,15 +142253,25 @@ static int sqlite3LoadExtension( /* tag-20210611-1. Some dlopen() implementations will segfault if given ** an oversize filename. Most filesystems have a pathname limit of 4K, ** so limit the extension filename length to about twice that. - ** https://sqlite.org/forum/forumpost/08a0d6d9bf */ + ** https://sqlite.org/forum/forumpost/08a0d6d9bf + ** + ** Later (2023-03-25): Save an extra 6 bytes for the filename suffix. + ** See https://sqlite.org/forum/forumpost/24083b579d. + */ if( nMsg>SQLITE_MAX_PATHLEN ) goto extension_not_found; + /* Do not allow sqlite3_load_extension() to link to a copy of the + ** running application, by passing in an empty filename. */ + if( nMsg==0 ) goto extension_not_found; + handle = sqlite3OsDlOpen(pVfs, zFile); #if SQLITE_OS_UNIX || SQLITE_OS_WIN for(ii=0; iimutex); if( onoff ){ db->flags |= SQLITE_LoadExtension|SQLITE_LoadExtFunc; @@ -136248,6 +142448,9 @@ SQLITE_API int sqlite3_auto_extension( void (*xInit)(void) ){ int rc = SQLITE_OK; +#ifdef SQLITE_ENABLE_API_ARMOR + if( xInit==0 ) return SQLITE_MISUSE_BKPT; +#endif #ifndef SQLITE_OMIT_AUTOINIT rc = sqlite3_initialize(); if( rc ){ @@ -136300,6 +142503,9 @@ SQLITE_API int sqlite3_cancel_auto_extension( int i; int n = 0; wsdAutoextInit; +#ifdef SQLITE_ENABLE_API_ARMOR + if( xInit==0 ) return 0; +#endif sqlite3_mutex_enter(mutex); for(i=(int)wsdAutoext.nExt-1; i>=0; i--){ if( wsdAutoext.aExt[i]==xInit ){ @@ -137963,7 +144169,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** ** The first form reports the current local setting for the ** page cache spill size. The second form turns cache spill on - ** or off. When turnning cache spill on, the size is set to the + ** or off. When turning cache spill on, the size is set to the ** current cache_size. The third form sets a spill size that ** may be different form the cache size. ** If N is positive then that is the @@ -138233,7 +144439,11 @@ SQLITE_PRIVATE void sqlite3Pragma( #endif if( sqlite3GetBoolean(zRight, 0) ){ - db->flags |= mask; + if( (mask & SQLITE_WriteSchema)==0 + || (db->flags & SQLITE_Defensive)==0 + ){ + db->flags |= mask; + } }else{ db->flags &= ~mask; if( mask==SQLITE_DeferFKs ) db->nDeferredImmCons = 0; @@ -138633,7 +144843,7 @@ SQLITE_PRIVATE void sqlite3Pragma( zDb = db->aDb[iDb].zDbSName; sqlite3CodeVerifySchema(pParse, iDb); sqlite3TableLock(pParse, iDb, pTab->tnum, 0, pTab->zName); - if( pTab->nCol+regRow>pParse->nMem ) pParse->nMem = pTab->nCol + regRow; + sqlite3TouchRegister(pParse, pTab->nCol+regRow); sqlite3OpenTable(pParse, 0, iDb, pTab, OP_OpenRead); sqlite3VdbeLoadString(v, regResult, pTab->zName); assert( IsOrdinaryTable(pTab) ); @@ -138674,7 +144884,7 @@ SQLITE_PRIVATE void sqlite3Pragma( ** regRow..regRow+n. If any of the child key values are NULL, this ** row cannot cause an FK violation. Jump directly to addrOk in ** this case. */ - if( regRow+pFK->nCol>pParse->nMem ) pParse->nMem = regRow+pFK->nCol; + sqlite3TouchRegister(pParse, regRow + pFK->nCol); for(j=0; jnCol; j++){ int iCol = aiCols ? aiCols[j] : pFK->aCol[j].iFrom; sqlite3ExprCodeGetColumnOfTable(v, pTab, 0, iCol, regRow+j); @@ -138741,9 +144951,9 @@ SQLITE_PRIVATE void sqlite3Pragma( ** The "quick_check" is reduced version of ** integrity_check designed to detect most database corruption ** without the overhead of cross-checking indexes. Quick_check - ** is linear time wherease integrity_check is O(NlogN). + ** is linear time whereas integrity_check is O(NlogN). ** - ** The maximum nubmer of errors is 100 by default. A different default + ** The maximum number of errors is 100 by default. A different default ** can be specified using a numeric parameter N. ** ** Or, the parameter N can be the name of a table. In that case, only @@ -138803,6 +145013,7 @@ SQLITE_PRIVATE void sqlite3Pragma( if( iDb>=0 && i!=iDb ) continue; sqlite3CodeVerifySchema(pParse, i); + pParse->okConstFactor = 0; /* tag-20230327-1 */ /* Do an integrity check of the B-Tree ** @@ -138838,7 +145049,7 @@ SQLITE_PRIVATE void sqlite3Pragma( aRoot[0] = cnt; /* Make sure sufficient number of registers have been allocated */ - pParse->nMem = MAX( pParse->nMem, 8+mxIdx ); + sqlite3TouchRegister(pParse, 8+mxIdx); sqlite3ClearTempRegCache(pParse); /* Do the b-tree integrity checks */ @@ -138857,15 +145068,24 @@ SQLITE_PRIVATE void sqlite3Pragma( for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ Table *pTab = sqliteHashData(x); Index *pIdx, *pPk; - Index *pPrior = 0; + Index *pPrior = 0; /* Previous index */ int loopTop; int iDataCur, iIdxCur; int r1 = -1; - int bStrict; + int bStrict; /* True for a STRICT table */ + int r2; /* Previous key for WITHOUT ROWID tables */ + int mxCol; /* Maximum non-virtual column number */ - if( !IsOrdinaryTable(pTab) ) continue; if( pObjTab && pObjTab!=pTab ) continue; - pPk = HasRowid(pTab) ? 0 : sqlite3PrimaryKeyIndex(pTab); + if( !IsOrdinaryTable(pTab) ) continue; + if( isQuick || HasRowid(pTab) ){ + pPk = 0; + r2 = 0; + }else{ + pPk = sqlite3PrimaryKeyIndex(pTab); + r2 = sqlite3GetTempRange(pParse, pPk->nKeyCol); + sqlite3VdbeAddOp3(v, OP_Null, 1, r2, r2+pPk->nKeyCol-1); + } sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenRead, 0, 1, 0, &iDataCur, &iIdxCur); /* reg[7] counts the number of entries in the table. @@ -138879,52 +145099,181 @@ SQLITE_PRIVATE void sqlite3Pragma( assert( sqlite3NoTempsInRange(pParse,1,7+j) ); sqlite3VdbeAddOp2(v, OP_Rewind, iDataCur, 0); VdbeCoverage(v); loopTop = sqlite3VdbeAddOp2(v, OP_AddImm, 7, 1); + + /* Fetch the right-most column from the table. This will cause + ** the entire record header to be parsed and sanity checked. It + ** will also prepopulate the cursor column cache that is used + ** by the OP_IsType code, so it is a required step. + */ + assert( !IsVirtual(pTab) ); + if( HasRowid(pTab) ){ + mxCol = -1; + for(j=0; jnCol; j++){ + if( (pTab->aCol[j].colFlags & COLFLAG_VIRTUAL)==0 ) mxCol++; + } + if( mxCol==pTab->iPKey ) mxCol--; + }else{ + /* COLFLAG_VIRTUAL columns are not included in the WITHOUT ROWID + ** PK index column-count, so there is no need to account for them + ** in this case. */ + mxCol = sqlite3PrimaryKeyIndex(pTab)->nColumn-1; + } + if( mxCol>=0 ){ + sqlite3VdbeAddOp3(v, OP_Column, iDataCur, mxCol, 3); + sqlite3VdbeTypeofColumn(v, 3); + } + if( !isQuick ){ - /* Sanity check on record header decoding */ - sqlite3VdbeAddOp3(v, OP_Column, iDataCur, pTab->nNVCol-1,3); - sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); - VdbeComment((v, "(right-most column)")); + if( pPk ){ + /* Verify WITHOUT ROWID keys are in ascending order */ + int a1; + char *zErr; + a1 = sqlite3VdbeAddOp4Int(v, OP_IdxGT, iDataCur, 0,r2,pPk->nKeyCol); + VdbeCoverage(v); + sqlite3VdbeAddOp1(v, OP_IsNull, r2); VdbeCoverage(v); + zErr = sqlite3MPrintf(db, + "row not in PRIMARY KEY order for %s", + pTab->zName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, a1); + sqlite3VdbeJumpHere(v, a1+1); + for(j=0; jnKeyCol; j++){ + sqlite3ExprCodeLoadIndexColumn(pParse, pPk, iDataCur, j, r2+j); + } + } } - /* Verify that all NOT NULL columns really are NOT NULL. At the - ** same time verify the type of the content of STRICT tables */ + /* Verify datatypes for all columns: + ** + ** (1) NOT NULL columns may not contain a NULL + ** (2) Datatype must be exact for non-ANY columns in STRICT tables + ** (3) Datatype for TEXT columns in non-STRICT tables must be + ** NULL, TEXT, or BLOB. + ** (4) Datatype for numeric columns in non-STRICT tables must not + ** be a TEXT value that can be losslessly converted to numeric. + */ bStrict = (pTab->tabFlags & TF_Strict)!=0; for(j=0; jnCol; j++){ char *zErr; - Column *pCol = pTab->aCol + j; - int doError, jmp2; + Column *pCol = pTab->aCol + j; /* The column to be checked */ + int labelError; /* Jump here to report an error */ + int labelOk; /* Jump here if all looks ok */ + int p1, p3, p4; /* Operands to the OP_IsType opcode */ + int doTypeCheck; /* Check datatypes (besides NOT NULL) */ + if( j==pTab->iPKey ) continue; - if( pCol->notNull==0 && !bStrict ) continue; - doError = bStrict ? sqlite3VdbeMakeLabel(pParse) : 0; - sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); - if( sqlite3VdbeGetOp(v,-1)->opcode==OP_Column ){ - sqlite3VdbeChangeP5(v, OPFLAG_TYPEOFARG); + if( bStrict ){ + doTypeCheck = pCol->eCType>COLTYPE_ANY; + }else{ + doTypeCheck = pCol->affinity>SQLITE_AFF_BLOB; + } + if( pCol->notNull==0 && !doTypeCheck ) continue; + + /* Compute the operands that will be needed for OP_IsType */ + p4 = SQLITE_NULL; + if( pCol->colFlags & COLFLAG_VIRTUAL ){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); + p1 = -1; + p3 = 3; + }else{ + if( pCol->iDflt ){ + sqlite3_value *pDfltValue = 0; + sqlite3ValueFromExpr(db, sqlite3ColumnExpr(pTab,pCol), ENC(db), + pCol->affinity, &pDfltValue); + if( pDfltValue ){ + p4 = sqlite3_value_type(pDfltValue); + sqlite3ValueFree(pDfltValue); + } + } + p1 = iDataCur; + if( !HasRowid(pTab) ){ + testcase( j!=sqlite3TableColumnToStorage(pTab, j) ); + p3 = sqlite3TableColumnToIndex(sqlite3PrimaryKeyIndex(pTab), j); + }else{ + p3 = sqlite3TableColumnToStorage(pTab,j); + testcase( p3!=j); + } } + + labelError = sqlite3VdbeMakeLabel(pParse); + labelOk = sqlite3VdbeMakeLabel(pParse); if( pCol->notNull ){ - jmp2 = sqlite3VdbeAddOp1(v, OP_NotNull, 3); VdbeCoverage(v); + /* (1) NOT NULL columns may not contain a NULL */ + int jmp3; + int jmp2 = sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + VdbeCoverage(v); + if( p1<0 ){ + sqlite3VdbeChangeP5(v, 0x0f); /* INT, REAL, TEXT, or BLOB */ + jmp3 = jmp2; + }else{ + sqlite3VdbeChangeP5(v, 0x0d); /* INT, TEXT, or BLOB */ + /* OP_IsType does not detect NaN values in the database file + ** which should be treated as a NULL. So if the header type + ** is REAL, we have to load the actual data using OP_Column + ** to reliably determine if the value is a NULL. */ + sqlite3VdbeAddOp3(v, OP_Column, p1, p3, 3); + sqlite3ColumnDefault(v, pTab, j, 3); + jmp3 = sqlite3VdbeAddOp2(v, OP_NotNull, 3, labelOk); + VdbeCoverage(v); + } zErr = sqlite3MPrintf(db, "NULL value in %s.%s", pTab->zName, pCol->zCnName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); - if( bStrict && pCol->eCType!=COLTYPE_ANY ){ - sqlite3VdbeGoto(v, doError); + if( doTypeCheck ){ + sqlite3VdbeGoto(v, labelError); + sqlite3VdbeJumpHere(v, jmp2); + sqlite3VdbeJumpHere(v, jmp3); }else{ - integrityCheckResultRow(v); + /* VDBE byte code will fall thru */ } - sqlite3VdbeJumpHere(v, jmp2); } - if( (pTab->tabFlags & TF_Strict)!=0 - && pCol->eCType!=COLTYPE_ANY - ){ - jmp2 = sqlite3VdbeAddOp3(v, OP_IsNullOrType, 3, 0, - sqlite3StdTypeMap[pCol->eCType-1]); + if( bStrict && doTypeCheck ){ + /* (2) Datatype must be exact for non-ANY columns in STRICT tables*/ + static unsigned char aStdTypeMask[] = { + 0x1f, /* ANY */ + 0x18, /* BLOB */ + 0x11, /* INT */ + 0x11, /* INTEGER */ + 0x13, /* REAL */ + 0x14 /* TEXT */ + }; + sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + assert( pCol->eCType>=1 && pCol->eCType<=sizeof(aStdTypeMask) ); + sqlite3VdbeChangeP5(v, aStdTypeMask[pCol->eCType-1]); VdbeCoverage(v); zErr = sqlite3MPrintf(db, "non-%s value in %s.%s", sqlite3StdType[pCol->eCType-1], pTab->zName, pTab->aCol[j].zCnName); sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); - sqlite3VdbeResolveLabel(v, doError); - integrityCheckResultRow(v); - sqlite3VdbeJumpHere(v, jmp2); + }else if( !bStrict && pCol->affinity==SQLITE_AFF_TEXT ){ + /* (3) Datatype for TEXT columns in non-STRICT tables must be + ** NULL, TEXT, or BLOB. */ + sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + sqlite3VdbeChangeP5(v, 0x1c); /* NULL, TEXT, or BLOB */ + VdbeCoverage(v); + zErr = sqlite3MPrintf(db, "NUMERIC value in %s.%s", + pTab->zName, pTab->aCol[j].zCnName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); + }else if( !bStrict && pCol->affinity>=SQLITE_AFF_NUMERIC ){ + /* (4) Datatype for numeric columns in non-STRICT tables must not + ** be a TEXT value that can be converted to numeric. */ + sqlite3VdbeAddOp4Int(v, OP_IsType, p1, labelOk, p3, p4); + sqlite3VdbeChangeP5(v, 0x1b); /* NULL, INT, FLOAT, or BLOB */ + VdbeCoverage(v); + if( p1>=0 ){ + sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, j, 3); + } + sqlite3VdbeAddOp4(v, OP_Affinity, 3, 1, 0, "C", P4_STATIC); + sqlite3VdbeAddOp4Int(v, OP_IsType, -1, labelOk, 3, p4); + sqlite3VdbeChangeP5(v, 0x1c); /* NULL, TEXT, or BLOB */ + VdbeCoverage(v); + zErr = sqlite3MPrintf(db, "TEXT value in %s.%s", + pTab->zName, pTab->aCol[j].zCnName); + sqlite3VdbeAddOp4(v, OP_String8, 0, 3, 0, zErr, P4_DYNAMIC); } + sqlite3VdbeResolveLabel(v, labelError); + integrityCheckResultRow(v); + sqlite3VdbeResolveLabel(v, labelOk); } /* Verify CHECK constraints */ if( pTab->pCheck && (db->flags & SQLITE_IgnoreChecks)==0 ){ @@ -138953,7 +145302,8 @@ SQLITE_PRIVATE void sqlite3Pragma( if( !isQuick ){ /* Omit the remaining tests for quick_check */ /* Validate index entries for the current row */ for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){ - int jmp2, jmp3, jmp4, jmp5; + int jmp2, jmp3, jmp4, jmp5, label6; + int kk; int ckUniq = sqlite3VdbeMakeLabel(pParse); if( pPk==pIdx ) continue; r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3, @@ -138971,13 +145321,49 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3); jmp4 = integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, jmp2); + + /* The OP_IdxRowid opcode is an optimized version of OP_Column + ** that extracts the rowid off the end of the index record. + ** But it only works correctly if index record does not have + ** any extra bytes at the end. Verify that this is the case. */ + if( HasRowid(pTab) ){ + int jmp7; + sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur+j, 3); + jmp7 = sqlite3VdbeAddOp3(v, OP_Eq, 3, 0, r1+pIdx->nColumn-1); + VdbeCoverageNeverNull(v); + sqlite3VdbeLoadString(v, 3, + "rowid not at end-of-record for row "); + sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); + sqlite3VdbeLoadString(v, 4, " of index "); + sqlite3VdbeGoto(v, jmp5-1); + sqlite3VdbeJumpHere(v, jmp7); + } + + /* Any indexed columns with non-BINARY collations must still hold + ** the exact same text value as the table. */ + label6 = 0; + for(kk=0; kknKeyCol; kk++){ + if( pIdx->azColl[kk]==sqlite3StrBINARY ) continue; + if( label6==0 ) label6 = sqlite3VdbeMakeLabel(pParse); + sqlite3VdbeAddOp3(v, OP_Column, iIdxCur+j, kk, 3); + sqlite3VdbeAddOp3(v, OP_Ne, 3, label6, r1+kk); VdbeCoverage(v); + } + if( label6 ){ + int jmp6 = sqlite3VdbeAddOp0(v, OP_Goto); + sqlite3VdbeResolveLabel(v, label6); + sqlite3VdbeLoadString(v, 3, "row "); + sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3); + sqlite3VdbeLoadString(v, 4, " values differ from index "); + sqlite3VdbeGoto(v, jmp5-1); + sqlite3VdbeJumpHere(v, jmp6); + } + /* For UNIQUE indexes, verify that only one entry exists with the ** current key. The entry is unique if (1) any column is NULL ** or (2) the next entry has a different key */ if( IsUniqueIndex(pIdx) ){ int uniqOk = sqlite3VdbeMakeLabel(pParse); int jmp6; - int kk; for(kk=0; kknKeyCol; kk++){ int iCol = pIdx->aiColumn[kk]; assert( iCol!=XN_ROWID && iColnCol ); @@ -139012,8 +145398,43 @@ SQLITE_PRIVATE void sqlite3Pragma( integrityCheckResultRow(v); sqlite3VdbeJumpHere(v, addr); } + if( pPk ){ + sqlite3ReleaseTempRange(pParse, r2, pPk->nKeyCol); + } } } + +#ifndef SQLITE_OMIT_VIRTUALTABLE + /* Second pass to invoke the xIntegrity method on all virtual + ** tables. + */ + for(x=sqliteHashFirst(pTbls); x; x=sqliteHashNext(x)){ + Table *pTab = sqliteHashData(x); + sqlite3_vtab *pVTab; + int a1; + if( pObjTab && pObjTab!=pTab ) continue; + if( IsOrdinaryTable(pTab) ) continue; + if( !IsVirtual(pTab) ) continue; + if( pTab->nCol<=0 ){ + const char *zMod = pTab->u.vtab.azArg[0]; + if( sqlite3HashFind(&db->aModule, zMod)==0 ) continue; + } + sqlite3ViewGetColumnNames(pParse, pTab); + if( pTab->u.vtab.p==0 ) continue; + pVTab = pTab->u.vtab.p->pVtab; + if( NEVER(pVTab==0) ) continue; + if( NEVER(pVTab->pModule==0) ) continue; + if( pVTab->pModule->iVersion<4 ) continue; + if( pVTab->pModule->xIntegrity==0 ) continue; + sqlite3VdbeAddOp3(v, OP_VCheck, i, 3, isQuick); + pTab->nTabRef++; + sqlite3VdbeAppendP4(v, pTab, P4_TABLEREF); + a1 = sqlite3VdbeAddOp1(v, OP_IsNull, 3); VdbeCoverage(v); + integrityCheckResultRow(v); + sqlite3VdbeJumpHere(v, a1); + continue; + } +#endif } { static const int iLn = VDBE_OFFSET_LINENO(2); @@ -139162,6 +145583,11 @@ SQLITE_PRIVATE void sqlite3Pragma( aOp[1].p2 = iCookie; aOp[1].p3 = sqlite3Atoi(zRight); aOp[1].p5 = 1; + if( iCookie==BTREE_SCHEMA_VERSION && (db->flags & SQLITE_Defensive)!=0 ){ + /* Do not allow the use of PRAGMA schema_version=VALUE in defensive + ** mode. Change the OP_SetCookie opcode into a no-op. */ + aOp[1].opcode = OP_Noop; + } }else{ /* Read the specified cookie value */ static const VdbeOpList readCookie[] = { @@ -139318,7 +145744,7 @@ SQLITE_PRIVATE void sqlite3Pragma( Schema *pSchema; /* The current schema */ Table *pTab; /* A table in the schema */ Index *pIdx; /* An index of the table */ - LogEst szThreshold; /* Size threshold above which reanalysis is needd */ + LogEst szThreshold; /* Size threshold above which reanalysis needed */ char *zSubSql; /* SQL statement for the OP_SqlExec opcode */ u32 opMask; /* Mask of operations to perform */ @@ -139532,6 +145958,12 @@ SQLITE_PRIVATE void sqlite3Pragma( sqlite3VdbeSetNumCols(v, 1); sqlite3VdbeSetColName(v, 0, COLNAME_NAME, "ok", SQLITE_STATIC); returnSingleText(v, "ok"); + } else { + sqlite3ErrorMsg(pParse, "An error occurred with PRAGMA key or rekey. " + "PRAGMA key requires a key of one or more characters. " + "PRAGMA rekey can only be run on an existing encrypted database. " + "Use sqlcipher_export() and ATTACH to convert encrypted/plaintext databases."); + goto pragma_out; } } break; @@ -139688,7 +146120,11 @@ static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ j = seen[0]-1; pIdxInfo->aConstraintUsage[j].argvIndex = 1; pIdxInfo->aConstraintUsage[j].omit = 1; - if( seen[1]==0 ) return SQLITE_OK; + if( seen[1]==0 ){ + pIdxInfo->estimatedCost = (double)1000; + pIdxInfo->estimatedRows = 1000; + return SQLITE_OK; + } pIdxInfo->estimatedCost = (double)20; pIdxInfo->estimatedRows = 20; j = seen[1]-1; @@ -139853,7 +146289,8 @@ static const sqlite3_module pragmaVtabModule = { 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; /* @@ -140185,7 +146622,14 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl #else encoding = SQLITE_UTF8; #endif - sqlite3SetTextEncoding(db, encoding); + if( db->nVdbeActive>0 && encoding!=ENC(db) + && (db->mDbFlags & DBFLAG_Vacuum)==0 + ){ + rc = SQLITE_LOCKED; + goto initone_error_out; + }else{ + sqlite3SetTextEncoding(db, encoding); + } }else{ /* If opening an attached database, the encoding much match ENC(db) */ if( (meta[BTREE_TEXT_ENCODING-1] & 3)!=ENC(db) ){ @@ -140399,8 +146843,8 @@ static void schemaIsValid(Parse *pParse){ sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie); assert( sqlite3SchemaMutexHeld(db, iDb, 0) ); if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){ + if( DbHasProperty(db, iDb, DB_SchemaLoaded) ) pParse->rc = SQLITE_SCHEMA; sqlite3ResetOneSchema(db, iDb); - pParse->rc = SQLITE_SCHEMA; } /* Close the transaction, if one was opened. */ @@ -140453,15 +146897,15 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){ assert( db->pParse==pParse ); assert( pParse->nested==0 ); #ifndef SQLITE_OMIT_SHARED_CACHE - sqlite3DbFree(db, pParse->aTableLock); + if( pParse->aTableLock ) sqlite3DbNNFreeNN(db, pParse->aTableLock); #endif while( pParse->pCleanup ){ ParseCleanup *pCleanup = pParse->pCleanup; pParse->pCleanup = pCleanup->pNext; pCleanup->xCleanup(db, pCleanup->pPtr); - sqlite3DbFreeNN(db, pCleanup); + sqlite3DbNNFreeNN(db, pCleanup); } - sqlite3DbFree(db, pParse->aLabel); + if( pParse->aLabel ) sqlite3DbNNFreeNN(db, pParse->aLabel); if( pParse->pConstExpr ){ sqlite3ExprListDelete(db, pParse->pConstExpr); } @@ -140470,8 +146914,6 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){ db->lookaside.sz = db->lookaside.bDisable ? 0 : db->lookaside.szTrue; assert( pParse->db->pParse==pParse ); db->pParse = pParse->pOuterParse; - pParse->db = 0; - pParse->disableLookaside = 0; } /* @@ -140480,7 +146922,7 @@ SQLITE_PRIVATE void sqlite3ParseObjectReset(Parse *pParse){ ** immediately. ** ** Use this mechanism for uncommon cleanups. There is a higher setup -** cost for this mechansim (an extra malloc), so it should not be used +** cost for this mechanism (an extra malloc), so it should not be used ** for common cleanups that happen on most calls. But for less ** common cleanups, we save a single NULL-pointer comparison in ** sqlite3ParseObjectReset(), which reduces the total CPU cycle count. @@ -140572,9 +147014,18 @@ static int sqlite3Prepare( sParse.pOuterParse = db->pParse; db->pParse = &sParse; sParse.db = db; - sParse.pReprepare = pReprepare; + if( pReprepare ){ + sParse.pReprepare = pReprepare; + sParse.explain = sqlite3_stmt_isexplain((sqlite3_stmt*)pReprepare); + }else{ + assert( sParse.pReprepare==0 ); + } assert( ppStmt && *ppStmt==0 ); - if( db->mallocFailed ) sqlite3ErrorMsg(&sParse, "out of memory"); + if( db->mallocFailed ){ + sqlite3ErrorMsg(&sParse, "out of memory"); + db->errCode = rc = SQLITE_NOMEM; + goto end_prepare; + } assert( sqlite3_mutex_held(db->mutex) ); /* For a long-term use prepared statement avoid the use of @@ -140584,7 +147035,7 @@ static int sqlite3Prepare( sParse.disableLookaside++; DisableLookaside; } - sParse.disableVtab = (prepFlags & SQLITE_PREPARE_NO_VTAB)!=0; + sParse.prepFlags = prepFlags & 0xff; /* Check to verify that it is possible to get a read lock on all ** database schemas. The inability to get a read lock indicates that @@ -140625,7 +147076,9 @@ static int sqlite3Prepare( } } - sqlite3VtabUnlockList(db); +#ifndef SQLITE_OMIT_VIRTUALTABLE + if( db->pDisconnect ) sqlite3VtabUnlockList(db); +#endif if( nBytes>=0 && (nBytes==0 || zSql[nBytes-1]!=0) ){ char *zSqlCopy; @@ -140731,6 +147184,7 @@ static int sqlite3LockAndPrepare( assert( (rc&db->errMask)==rc ); db->busyHandler.nBusy = 0; sqlite3_mutex_leave(db->mutex); + assert( rc==SQLITE_OK || (*ppStmt)==0 ); return rc; } @@ -141009,6 +147463,10 @@ struct SortCtx { } aDefer[4]; #endif struct RowLoadInfo *pDeferredRowLoad; /* Deferred row loading info or NULL */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrPush; /* First instruction to push data into sorter */ + int addrPushEnd; /* Last instruction that pushes data into sorter */ +#endif }; #define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */ @@ -141020,6 +147478,7 @@ struct SortCtx { ** If bFree==0, Leave the first Select object unfreed */ static void clearSelect(sqlite3 *db, Select *p, int bFree){ + assert( db!=0 ); while( p ){ Select *pPrior = p->pPrior; sqlite3ExprListDelete(db, p->pEList); @@ -141039,7 +147498,7 @@ static void clearSelect(sqlite3 *db, Select *p, int bFree){ sqlite3WindowUnlinkFromSelect(p->pWin); } #endif - if( bFree ) sqlite3DbFreeNN(db, p); + if( bFree ) sqlite3DbNNFreeNN(db, p); p = pPrior; bFree = 1; } @@ -141123,6 +147582,9 @@ SQLITE_PRIVATE Select *sqlite3SelectNew( SQLITE_PRIVATE void sqlite3SelectDelete(sqlite3 *db, Select *p){ if( OK_IF_ALWAYS_TRUE(p) ) clearSelect(db, p, 1); } +SQLITE_PRIVATE void sqlite3SelectDeleteGeneric(sqlite3 *db, void *p){ + if( ALWAYS(p) ) clearSelect(db, (Select*)p, 1); +} /* ** Return a pointer to the right-most SELECT statement in a compound. @@ -141171,7 +147633,7 @@ static Select *findRightmost(Select *p){ ** NATURAL FULL OUTER JT_NATRUAL|JT_LEFT|JT_RIGHT ** ** To preserve historical compatibly, SQLite also accepts a variety -** of other non-standard and in many cases non-sensical join types. +** of other non-standard and in many cases nonsensical join types. ** This routine makes as much sense at it can from the nonsense join ** type and returns a result. Examples of accepted nonsense join types ** include but are not limited to: @@ -141393,6 +147855,7 @@ static void unsetJoinExpr(Expr *p, int iTable, int nullable){ } if( p->op==TK_FUNCTION ){ assert( ExprUseXList(p) ); + assert( p->pLeft==0 ); if( p->x.pList ){ int i; for(i=0; ix.pList->nExpr; i++){ @@ -141442,7 +147905,7 @@ static int sqlite3ProcessJoin(Parse *pParse, Select *p){ if( NEVER(pLeft->pTab==0 || pRightTab==0) ) continue; joinType = (pRight->fg.jointype & JT_OUTER)!=0 ? EP_OuterON : EP_InnerON; - /* If this is a NATURAL join, synthesize an approprate USING clause + /* If this is a NATURAL join, synthesize an appropriate USING clause ** to specify which columns should be joined. */ if( pRight->fg.jointype & JT_NATURAL ){ @@ -141656,14 +148119,18 @@ static void pushOntoSorter( ** (2) All output columns are included in the sort record. In that ** case regData==regOrigData. ** (3) Some output columns are omitted from the sort record due to - ** the SQLITE_ENABLE_SORTER_REFERENCE optimization, or due to the + ** the SQLITE_ENABLE_SORTER_REFERENCES optimization, or due to the ** SQLITE_ECEL_OMITREF optimization, or due to the - ** SortCtx.pDeferredRowLoad optimiation. In any of these cases + ** SortCtx.pDeferredRowLoad optimization. In any of these cases ** regOrigData is 0 to prevent this routine from trying to copy ** values that might not yet exist. */ assert( nData==1 || regData==regOrigData || regOrigData==0 ); +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + pSort->addrPush = sqlite3VdbeCurrentAddr(v); +#endif + if( nPrefixReg ){ assert( nPrefixReg==nExpr+bSeq ); regBase = regData - nPrefixReg; @@ -141710,7 +148177,7 @@ static void pushOntoSorter( testcase( pKI->nAllField > pKI->nKeyField+2 ); pOp->p4.pKeyInfo = sqlite3KeyInfoFromExprList(pParse,pSort->pOrderBy,nOBSat, pKI->nAllField-pKI->nKeyField-1); - pOp = 0; /* Ensure pOp not used after sqltie3VdbeAddOp3() */ + pOp = 0; /* Ensure pOp not used after sqlite3VdbeAddOp3() */ addrJmp = sqlite3VdbeCurrentAddr(v); sqlite3VdbeAddOp3(v, OP_Jump, addrJmp+1, 0, addrJmp+1); VdbeCoverage(v); pSort->labelBkOut = sqlite3VdbeMakeLabel(pParse); @@ -141764,6 +148231,9 @@ static void pushOntoSorter( sqlite3VdbeChangeP2(v, iSkip, pSort->labelOBLopt ? pSort->labelOBLopt : sqlite3VdbeCurrentAddr(v)); } +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + pSort->addrPushEnd = sqlite3VdbeCurrentAddr(v)-1; +#endif } /* @@ -141801,7 +148271,7 @@ static void codeOffset( ** The returned value in this case is a copy of parameter iTab. ** ** WHERE_DISTINCT_ORDERED: -** In this case rows are being delivered sorted order. The ephermal +** In this case rows are being delivered sorted order. The ephemeral ** table is not required. Instead, the current set of values ** is compared against previous row. If they match, the new row ** is not distinct and control jumps to VM address addrRepeat. Otherwise, @@ -142230,6 +148700,16 @@ static void selectInnerLoop( testcase( eDest==SRT_Fifo ); testcase( eDest==SRT_DistFifo ); sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg); +#if !defined(SQLITE_ENABLE_NULL_TRIM) && defined(SQLITE_DEBUG) + /* A destination of SRT_Table and a non-zero iSDParm2 parameter means + ** that this is an "UPDATE ... FROM" on a virtual table or view. In this + ** case set the p5 parameter of the OP_MakeRecord to OPFLAG_NOCHNG_MAGIC. + ** This does not affect operation in any way - it just allows MakeRecord + ** to process OPFLAG_NOCHANGE values without an assert() failing. */ + if( eDest==SRT_Table && pDest->iSDParm2 ){ + sqlite3VdbeChangeP5(v, OPFLAG_NOCHNG_MAGIC); + } +#endif #ifndef SQLITE_OMIT_CTE if( eDest==SRT_DistFifo ){ /* If the destination is DistFifo, then cursor (iParm+1) is open @@ -142445,9 +148925,10 @@ SQLITE_PRIVATE KeyInfo *sqlite3KeyInfoAlloc(sqlite3 *db, int N, int X){ */ SQLITE_PRIVATE void sqlite3KeyInfoUnref(KeyInfo *p){ if( p ){ + assert( p->db!=0 ); assert( p->nRef>0 ); p->nRef--; - if( p->nRef==0 ) sqlite3DbFreeNN(p->db, p); + if( p->nRef==0 ) sqlite3DbNNFreeNN(p->db, p); } } @@ -142586,6 +149067,16 @@ static void generateSortTail( int bSeq; /* True if sorter record includes seq. no. */ int nRefKey = 0; struct ExprList_item *aOutEx = p->pEList->a; +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExplain; /* Address of OP_Explain instruction */ +#endif + + ExplainQueryPlan2(addrExplain, (pParse, 0, + "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat>0?"RIGHT PART OF ":"") + ); + sqlite3VdbeScanStatusRange(v, addrExplain,pSort->addrPush,pSort->addrPushEnd); + sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, pSort->addrPush); + assert( addrBreak<0 ); if( pSort->labelBkOut ){ @@ -142698,6 +149189,7 @@ static void generateSortTail( VdbeComment((v, "%s", aOutEx[i].zEName)); } } + sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); switch( eDest ){ case SRT_Table: case SRT_EphemTab: { @@ -142759,6 +149251,7 @@ static void generateSortTail( }else{ sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v); } + sqlite3VdbeScanStatusRange(v, addrExplain, sqlite3VdbeCurrentAddr(v)-1, -1); if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn); sqlite3VdbeResolveLabel(v, addrBreak); } @@ -142767,9 +149260,6 @@ static void generateSortTail( ** Return a pointer to a string containing the 'declaration type' of the ** expression pExpr. The string may be treated as static by the caller. ** -** Also try to estimate the size of the returned value and return that -** result in *pEstWidth. -** ** The declaration type is the exact datatype definition extracted from the ** original CREATE TABLE statement if the expression is a column. The ** declaration type for a ROWID field is INTEGER. Exactly when an expression @@ -142861,11 +149351,7 @@ static const char *columnTypeImpl( ** data for the result-set column of the sub-select. */ if( iColpEList->nExpr -#ifdef SQLITE_ALLOW_ROWID_IN_VIEW - && iCol>=0 -#else - && ALWAYS(iCol>=0) -#endif + && (!ViewCanHaveRowid || iCol>=0) ){ /* If iCol is less than zero, then the expression requests the ** rowid of the sub-select or view. This expression is legal (see @@ -143023,17 +149509,10 @@ SQLITE_PRIVATE void sqlite3GenerateColumnNames( int fullName; /* TABLE.COLUMN if no AS clause and is a direct table ref */ int srcName; /* COLUMN or TABLE.COLUMN if no AS clause and is direct */ -#ifndef SQLITE_OMIT_EXPLAIN - /* If this is an EXPLAIN, skip this step */ - if( pParse->explain ){ - return; - } -#endif - if( pParse->colNamesSet ) return; /* Column names are determined by the left-most term of a compound select */ while( pSelect->pPrior ) pSelect = pSelect->pPrior; - SELECTTRACE(1,pParse,pSelect,("generating column names\n")); + TREETRACE(0x80,pParse,pSelect,("generating column names\n")); pTabList = pSelect->pSrc; pEList = pSelect->pEList; assert( v!=0 ); @@ -143133,7 +149612,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( *pnCol = nCol; *paCol = aCol; - for(i=0, pCol=aCol; imallocFailed; i++, pCol++){ + for(i=0, pCol=aCol; inErr; i++, pCol++){ struct ExprList_item *pX = &pEList->a[i]; struct ExprList_item *pCollide; /* Get an appropriate name for the column @@ -143183,7 +149662,10 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( if( zName[j]==':' ) nName = j; } zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt); - if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt); + sqlite3ProgressCheck(pParse); + if( cnt>3 ){ + sqlite3_randomness(sizeof(cnt), &cnt); + } } pCol->zCnName = zName; pCol->hName = sqlite3StrIHash(zName); @@ -143196,71 +149678,105 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList( } } sqlite3HashClear(&ht); - if( db->mallocFailed ){ + if( pParse->nErr ){ for(j=0; jrc; } return SQLITE_OK; } /* -** Add type and collation information to a column list based on -** a SELECT statement. -** -** The column list presumably came from selectColumnNamesFromExprList(). -** The column list has only names, not types or collations. This -** routine goes through and adds the types and collations. +** pTab is a transient Table object that represents a subquery of some +** kind (maybe a parenthesized subquery in the FROM clause of a larger +** query, or a VIEW, or a CTE). This routine computes type information +** for that Table object based on the Select object that implements the +** subquery. For the purposes of this routine, "type information" means: ** -** This routine requires that all identifiers in the SELECT -** statement be resolved. +** * The datatype name, as it might appear in a CREATE TABLE statement +** * Which collating sequence to use for the column +** * The affinity of the column */ -SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation( - Parse *pParse, /* Parsing contexts */ - Table *pTab, /* Add column type information to this table */ - Select *pSelect, /* SELECT used to determine types and collations */ - char aff /* Default affinity for columns */ +SQLITE_PRIVATE void sqlite3SubqueryColumnTypes( + Parse *pParse, /* Parsing contexts */ + Table *pTab, /* Add column type information to this table */ + Select *pSelect, /* SELECT used to determine types and collations */ + char aff /* Default affinity. */ ){ sqlite3 *db = pParse->db; - NameContext sNC; Column *pCol; CollSeq *pColl; - int i; + int i,j; Expr *p; struct ExprList_item *a; + NameContext sNC; assert( pSelect!=0 ); - assert( (pSelect->selFlags & SF_Resolved)!=0 ); - assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed ); - if( db->mallocFailed ) return; + testcase( (pSelect->selFlags & SF_Resolved)==0 ); + assert( (pSelect->selFlags & SF_Resolved)!=0 || IN_RENAME_OBJECT ); + assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 ); + assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB ); + if( db->mallocFailed || IN_RENAME_OBJECT ) return; + while( pSelect->pPrior ) pSelect = pSelect->pPrior; + a = pSelect->pEList->a; memset(&sNC, 0, sizeof(sNC)); sNC.pSrcList = pSelect->pSrc; - a = pSelect->pEList->a; for(i=0, pCol=pTab->aCol; inCol; i++, pCol++){ const char *zType; - i64 n, m; + i64 n; pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT); p = a[i].pExpr; - zType = columnType(&sNC, p, 0, 0, 0); /* pCol->szEst = ... // Column size est for SELECT tables never used */ pCol->affinity = sqlite3ExprAffinity(p); + if( pCol->affinity<=SQLITE_AFF_NONE ){ + pCol->affinity = aff; + } + if( pCol->affinity>=SQLITE_AFF_TEXT && pSelect->pNext ){ + int m = 0; + Select *pS2; + for(m=0, pS2=pSelect->pNext; pS2; pS2=pS2->pNext){ + m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr); + } + if( pCol->affinity==SQLITE_AFF_TEXT && (m&0x01)!=0 ){ + pCol->affinity = SQLITE_AFF_BLOB; + }else + if( pCol->affinity>=SQLITE_AFF_NUMERIC && (m&0x02)!=0 ){ + pCol->affinity = SQLITE_AFF_BLOB; + } + if( pCol->affinity>=SQLITE_AFF_NUMERIC && p->op==TK_CAST ){ + pCol->affinity = SQLITE_AFF_FLEXNUM; + } + } + zType = columnType(&sNC, p, 0, 0, 0); + if( zType==0 || pCol->affinity!=sqlite3AffinityType(zType, 0) ){ + if( pCol->affinity==SQLITE_AFF_NUMERIC + || pCol->affinity==SQLITE_AFF_FLEXNUM + ){ + zType = "NUM"; + }else{ + zType = 0; + for(j=1; jaffinity ){ + zType = sqlite3StdType[j]; + break; + } + } + } + } if( zType ){ - m = sqlite3Strlen30(zType); + i64 m = sqlite3Strlen30(zType); n = sqlite3Strlen30(pCol->zCnName); pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2); + pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); if( pCol->zCnName ){ memcpy(&pCol->zCnName[n+1], zType, m+1); pCol->colFlags |= COLFLAG_HASTYPE; - }else{ - testcase( pCol->colFlags & COLFLAG_HASTYPE ); - pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL); } } - if( pCol->affinity<=SQLITE_AFF_NONE ) pCol->affinity = aff; pColl = sqlite3ExprCollSeq(pParse, p); if( pColl ){ assert( pTab->pIndex==0 ); @@ -143294,7 +149810,7 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect, c pTab->zName = 0; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol); - sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSelect, aff); + sqlite3SubqueryColumnTypes(pParse, pTab, pSelect, aff); pTab->iPKey = -1; if( db->mallocFailed ){ sqlite3DeleteTable(db, pTab); @@ -143509,7 +150025,7 @@ static void generateWithRecursiveQuery( int iQueue; /* The Queue table */ int iDistinct = 0; /* To ensure unique results if UNION */ int eDest = SRT_Fifo; /* How to write to Queue */ - SelectDest destQueue; /* SelectDest targetting the Queue table */ + SelectDest destQueue; /* SelectDest targeting the Queue table */ int i; /* Loop counter */ int rc; /* Result code */ ExprList *pOrderBy; /* The ORDER BY clause */ @@ -143819,7 +150335,7 @@ static int multiSelect( pPrior->iLimit = p->iLimit; pPrior->iOffset = p->iOffset; pPrior->pLimit = p->pLimit; - SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL left...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect UNION ALL left...\n")); rc = sqlite3Select(pParse, pPrior, &dest); pPrior->pLimit = 0; if( rc ){ @@ -143837,7 +150353,7 @@ static int multiSelect( } } ExplainQueryPlan((pParse, 1, "UNION ALL")); - SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL right...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect UNION ALL right...\n")); rc = sqlite3Select(pParse, p, &dest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; @@ -143890,7 +150406,7 @@ static int multiSelect( */ assert( !pPrior->pOrderBy ); sqlite3SelectDestInit(&uniondest, priorOp, unionTab); - SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION left...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect EXCEPT/UNION left...\n")); rc = sqlite3Select(pParse, pPrior, &uniondest); if( rc ){ goto multi_select_end; @@ -143910,7 +150426,7 @@ static int multiSelect( uniondest.eDest = op; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", sqlite3SelectOpName(p->op))); - SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION right...\n")); + TREETRACE(0x200, pParse, p, ("multiSelect EXCEPT/UNION right...\n")); rc = sqlite3Select(pParse, p, &uniondest); testcase( rc!=SQLITE_OK ); assert( p->pOrderBy==0 ); @@ -143971,7 +150487,7 @@ static int multiSelect( /* Code the SELECTs to our left into temporary table "tab1". */ sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1); - SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT left...\n")); + TREETRACE(0x400, pParse, p, ("multiSelect INTERSECT left...\n")); rc = sqlite3Select(pParse, pPrior, &intersectdest); if( rc ){ goto multi_select_end; @@ -143988,7 +150504,7 @@ static int multiSelect( intersectdest.iSDParm = tab2; ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE", sqlite3SelectOpName(p->op))); - SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT right...\n")); + TREETRACE(0x400, pParse, p, ("multiSelect INTERSECT right...\n")); rc = sqlite3Select(pParse, p, &intersectdest); testcase( rc!=SQLITE_OK ); pDelete = p->pPrior; @@ -144085,9 +150601,7 @@ static int multiSelect( pDest->iSdst = dest.iSdst; pDest->nSdst = dest.nSdst; if( pDelete ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3SelectDelete, - pDelete); + sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pDelete); } return rc; } @@ -144109,7 +150623,7 @@ SQLITE_PRIVATE void sqlite3SelectWrongNumTermsError(Parse *pParse, Select *p){ /* ** Code an output subroutine for a coroutine implementation of a -** SELECT statment. +** SELECT statement. ** ** The data to be output is contained in pIn->iSdst. There are ** pIn->nSdst columns to be output. pDest is where the output should @@ -144331,7 +150845,7 @@ static int generateOutputSubroutine( ** ** We call AltB, AeqB, AgtB, EofA, and EofB "subroutines" but they are not ** actually called using Gosub and they do not Return. EofA and EofB loop -** until all data is exhausted then jump to the "end" labe. AltB, AeqB, +** until all data is exhausted then jump to the "end" label. AltB, AeqB, ** and AgtB jump to either L2 or to one of EofA or EofB. */ #ifndef SQLITE_OMIT_COMPOUND_SELECT @@ -144368,7 +150882,7 @@ static int multiSelectOrderBy( int savedOffset; /* Saved value of p->iOffset */ int labelCmpr; /* Label for the start of the merge algorithm */ int labelEnd; /* Label for the end of the overall SELECT stmt */ - int addr1; /* Jump instructions that get retargetted */ + int addr1; /* Jump instructions that get retargeted */ int op; /* One of TK_ALL, TK_UNION, TK_EXCEPT, TK_INTERSECT */ KeyInfo *pKeyDup = 0; /* Comparison information for duplicate removal */ KeyInfo *pKeyMerge; /* Comparison information for merging rows */ @@ -144635,11 +151149,10 @@ static int multiSelectOrderBy( */ sqlite3VdbeResolveLabel(v, labelEnd); - /* Reassemble the compound query so that it will be freed correctly - ** by the calling function */ + /* Make arrangements to free the 2nd and subsequent arms of the compound + ** after the parse has finished */ if( pSplit->pPrior ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3SelectDelete, pSplit->pPrior); + sqlite3ParserAddCleanup(pParse, sqlite3SelectDeleteGeneric, pSplit->pPrior); } pSplit->pPrior = pPrior; pPrior->pNext = pSplit; @@ -144669,7 +151182,7 @@ static int multiSelectOrderBy( ** the left operands of a RIGHT JOIN. In either case, we need to potentially ** bypass the substituted expression with OP_IfNullRow. ** -** Suppose the original expression integer constant. Even though the table +** Suppose the original expression is an integer constant. Even though the table ** has the nullRow flag set, because the expression is an integer constant, ** it will not be NULLed out. So instead, we insert an OP_IfNullRow opcode ** that checks to see if the nullRow flag is set on the table. If the nullRow @@ -144695,6 +151208,7 @@ typedef struct SubstContext { int iNewTable; /* New table number */ int isOuterJoin; /* Add TK_IF_NULL_ROW opcodes on each replacement */ ExprList *pEList; /* Replacement expressions */ + ExprList *pCList; /* Collation sequences for replacement expr */ } SubstContext; /* Forward Declarations */ @@ -144736,19 +151250,26 @@ static Expr *substExpr( #endif { Expr *pNew; - Expr *pCopy = pSubst->pEList->a[pExpr->iColumn].pExpr; + int iColumn; + Expr *pCopy; Expr ifNullRow; - assert( pSubst->pEList!=0 && pExpr->iColumnpEList->nExpr ); + iColumn = pExpr->iColumn; + assert( iColumn>=0 ); + assert( pSubst->pEList!=0 && iColumnpEList->nExpr ); assert( pExpr->pRight==0 ); + pCopy = pSubst->pEList->a[iColumn].pExpr; if( sqlite3ExprIsVector(pCopy) ){ sqlite3VectorErrorMsg(pSubst->pParse, pCopy); }else{ sqlite3 *db = pSubst->pParse->db; - if( pSubst->isOuterJoin && pCopy->op!=TK_COLUMN ){ + if( pSubst->isOuterJoin + && (pCopy->op!=TK_COLUMN || pCopy->iTable!=pSubst->iNewTable) + ){ memset(&ifNullRow, 0, sizeof(ifNullRow)); ifNullRow.op = TK_IF_NULL_ROW; ifNullRow.pLeft = pCopy; ifNullRow.iTable = pSubst->iNewTable; + ifNullRow.iColumn = -99; ifNullRow.flags = EP_IfNullRow; pCopy = &ifNullRow; } @@ -144775,11 +151296,16 @@ static Expr *substExpr( /* Ensure that the expression now has an implicit collation sequence, ** just as it did when it was a column of a view or sub-query. */ - if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE ){ - CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, pExpr); - pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, - (pColl ? pColl->zName : "BINARY") + { + CollSeq *pNat = sqlite3ExprCollSeq(pSubst->pParse, pExpr); + CollSeq *pColl = sqlite3ExprCollSeq(pSubst->pParse, + pSubst->pCList->a[iColumn].pExpr ); + if( pNat!=pColl || (pExpr->op!=TK_COLUMN && pExpr->op!=TK_COLLATE) ){ + pExpr = sqlite3ExprAddCollateString(pSubst->pParse, pExpr, + (pColl ? pColl->zName : "BINARY") + ); + } } ExprClearProperty(pExpr, EP_Collate); } @@ -144972,6 +151498,46 @@ static void renumberCursors( } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ +/* +** If pSel is not part of a compound SELECT, return a pointer to its +** expression list. Otherwise, return a pointer to the expression list +** of the leftmost SELECT in the compound. +*/ +static ExprList *findLeftmostExprlist(Select *pSel){ + while( pSel->pPrior ){ + pSel = pSel->pPrior; + } + return pSel->pEList; +} + +/* +** Return true if any of the result-set columns in the compound query +** have incompatible affinities on one or more arms of the compound. +*/ +static int compoundHasDifferentAffinities(Select *p){ + int ii; + ExprList *pList; + assert( p!=0 ); + assert( p->pEList!=0 ); + assert( p->pPrior!=0 ); + pList = p->pEList; + for(ii=0; iinExpr; ii++){ + char aff; + Select *pSub1; + assert( pList->a[ii].pExpr!=0 ); + aff = sqlite3ExprAffinity(pList->a[ii].pExpr); + for(pSub1=p->pPrior; pSub1; pSub1=pSub1->pPrior){ + assert( pSub1->pEList!=0 ); + assert( pSub1->pEList->nExpr>ii ); + assert( pSub1->pEList->a[ii].pExpr!=0 ); + if( sqlite3ExprAffinity(pSub1->pEList->a[ii].pExpr)!=aff ){ + return 1; + } + } + } + return 0; +} + #if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) /* ** This routine attempts to flatten subqueries as a performance optimization. @@ -145016,7 +151582,8 @@ static void renumberCursors( ** (3a) the subquery may not be a join and ** (3b) the FROM clause of the subquery may not contain a virtual ** table and -** (3c) the outer query may not be an aggregate. +** (**) Was: "The outer query may not have a GROUP BY." This case +** is now managed correctly ** (3d) the outer query may not be DISTINCT. ** See also (26) for restrictions on RIGHT JOIN. ** @@ -145039,7 +151606,7 @@ static void renumberCursors( ** (9) If the subquery uses LIMIT then the outer query may not be aggregate. ** ** (**) Restriction (10) was removed from the code on 2005-02-05 but we -** accidently carried the comment forward until 2014-09-15. Original +** accidentally carried the comment forward until 2014-09-15. Original ** constraint: "If the subquery is aggregate then the outer query ** may not use LIMIT." ** @@ -145073,6 +151640,8 @@ static void renumberCursors( ** (17g) either the subquery is the first element of the outer ** query or there are no RIGHT or FULL JOINs in any arm ** of the subquery. (This is a duplicate of condition (27b).) +** (17h) The corresponding result set expressions in all arms of the +** compound must have the same affinity. ** ** The parent and sub-query may contain WHERE clauses. Subject to ** rules (11), (13) and (14), they may also contain ORDER BY, @@ -145124,18 +151693,13 @@ static void renumberCursors( ** See also (3) for restrictions on LEFT JOIN. ** ** (27) The subquery may not contain a FULL or RIGHT JOIN unless it -** is the first element of the parent query. This must be the -** the case if: -** (27a) the subquery is not compound query, and +** is the first element of the parent query. Two subcases: +** (27a) the subquery is not a compound query. ** (27b) the subquery is a compound query and the RIGHT JOIN occurs ** in any arm of the compound query. (See also (17g).) ** -** (28) The subquery is not a MATERIALIZED CTE. -** -** (29) Either the subquery is not the right-hand operand of a join with an -** ON or USING clause nor the right-hand operand of a NATURAL JOIN, or -** the right-most table within the FROM clause of the subquery -** is not part of an outer join. +** (28) The subquery is not a MATERIALIZED CTE. (This is handled +** in the caller before ever reaching this routine.) ** ** ** In this routine, the "p" parameter is a pointer to the outer query. @@ -145228,16 +151792,10 @@ static int flattenSubquery( ** ** which is not at all the same thing. ** - ** If the subquery is the right operand of a LEFT JOIN, then the outer - ** query cannot be an aggregate. (3c) This is an artifact of the way - ** aggregates are processed - there is no mechanism to determine if - ** the LEFT JOIN table should be all-NULL. - ** ** See also tickets #306, #350, and #3300. */ if( (pSubitem->fg.jointype & (JT_OUTER|JT_LTORJ))!=0 ){ if( pSubSrc->nSrc>1 /* (3a) */ - || isAgg /* (3c) */ || IsVirtual(pSubSrc->a[0].pTab) /* (3b) */ || (p->selFlags & SF_Distinct)!=0 /* (3d) */ || (pSubitem->fg.jointype & JT_RIGHT)!=0 /* (26) */ @@ -145246,52 +151804,14 @@ static int flattenSubquery( } isOuterJoin = 1; } -#ifdef SQLITE_EXTRA_IFNULLROW - else if( iFrom>0 && !isAgg ){ - /* Setting isOuterJoin to -1 causes OP_IfNullRow opcodes to be generated for - ** every reference to any result column from subquery in a join, even - ** though they are not necessary. This will stress-test the OP_IfNullRow - ** opcode. */ - isOuterJoin = -1; - } -#endif assert( pSubSrc->nSrc>0 ); /* True by restriction (7) */ if( iFrom>0 && (pSubSrc->a[0].fg.jointype & JT_LTORJ)!=0 ){ return 0; /* Restriction (27a) */ } - if( pSubitem->fg.isCte && pSubitem->u2.pCteUse->eM10d==M10d_Yes ){ - return 0; /* (28) */ - } - /* Restriction (29): - ** - ** We do not want two constraints on the same term of the flattened - ** query where one constraint has EP_InnerON and the other is EP_OuterON. - ** To prevent this, one or the other of the following conditions must be - ** false: - ** - ** (29a) The right-most entry in the FROM clause of the subquery - ** must not be part of an outer join. - ** - ** (29b) The subquery itself must not be the right operand of a - ** NATURAL join or a join that as an ON or USING clause. - ** - ** These conditions are sufficient to keep an EP_OuterON from being - ** flattened into an EP_InnerON. Restrictions (3a) and (27a) prevent - ** an EP_InnerON from being flattened into an EP_OuterON. - */ - if( pSubSrc->nSrc>=2 - && (pSubSrc->a[pSubSrc->nSrc-1].fg.jointype & JT_OUTER)!=0 - ){ - if( (pSubitem->fg.jointype & JT_NATURAL)!=0 - || pSubitem->fg.isUsing - || NEVER(pSubitem->u3.pOn!=0) /* ON clause already shifted into WHERE */ - || pSubitem->fg.isOn - ){ - return 0; - } - } + /* Condition (28) is blocked by the caller */ + assert( !pSubitem->fg.isCte || pSubitem->u2.pCteUse->eM10d!=M10d_Yes ); /* Restriction (17): If the sub-query is a compound SELECT, then it must ** use only the UNION ALL operator. And none of the simple select queries @@ -145299,6 +151819,7 @@ static int flattenSubquery( ** queries. */ if( pSub->pPrior ){ + int ii; if( pSub->pOrderBy ){ return 0; /* Restriction (20) */ } @@ -145331,7 +151852,6 @@ static int flattenSubquery( /* Restriction (18). */ if( p->pOrderBy ){ - int ii; for(ii=0; iipOrderBy->nExpr; ii++){ if( p->pOrderBy->a[ii].u.x.iOrderByCol==0 ) return 0; } @@ -145340,6 +151860,9 @@ static int flattenSubquery( /* Restriction (23) */ if( (p->selFlags & SF_Recursive) ) return 0; + /* Restriction (17h) */ + if( compoundHasDifferentAffinities(pSub) ) return 0; + if( pSrc->nSrc>1 ){ if( pParse->nSelect>500 ) return 0; if( OptimizationDisabled(db, SQLITE_FlttnUnionAll) ) return 0; @@ -145349,7 +151872,7 @@ static int flattenSubquery( } /***** If we reach this point, flattening is permitted. *****/ - SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n", + TREETRACE(0x4,pParse,p,("flatten %u.%p from term %d\n", pSub->selId, pSub, iFrom)); /* Authorize the subquery */ @@ -145358,7 +151881,7 @@ static int flattenSubquery( testcase( i==SQLITE_DENY ); pParse->zAuthContext = zSavedAuthContext; - /* Delete the transient structures associated with thesubquery */ + /* Delete the transient structures associated with the subquery */ pSub1 = pSubitem->pSelect; sqlite3DbFree(db, pSubitem->zDatabase); sqlite3DbFree(db, pSubitem->zName); @@ -145428,7 +151951,7 @@ static int flattenSubquery( if( pPrior ) pPrior->pNext = pNew; pNew->pNext = p; p->pPrior = pNew; - SELECTTRACE(2,pParse,p,("compound-subquery flattener" + TREETRACE(0x4,pParse,p,("compound-subquery flattener" " creates %u as peer\n",pNew->selId)); } assert( pSubitem->pSelect==0 ); @@ -145450,9 +151973,7 @@ static int flattenSubquery( Table *pTabToDel = pSubitem->pTab; if( pTabToDel->nTabRef==1 ){ Parse *pToplevel = sqlite3ParseToplevel(pParse); - sqlite3ParserAddCleanup(pToplevel, - (void(*)(sqlite3*,void*))sqlite3DeleteTable, - pTabToDel); + sqlite3ParserAddCleanup(pToplevel, sqlite3DeleteTableGeneric, pTabToDel); testcase( pToplevel->earlyCleanup ); }else{ pTabToDel->nTabRef--; @@ -145540,7 +152061,7 @@ static int flattenSubquery( ** ORDER BY column expression is identical to the iOrderByCol'th ** expression returned by SELECT statement pSub. Since these values ** do not necessarily correspond to columns in SELECT statement pParent, - ** zero them before transfering the ORDER BY clause. + ** zero them before transferring the ORDER BY clause. ** ** Not doing this may cause an error if a subsequent call to this ** function attempts to flatten a compound sub-query into pParent @@ -145573,6 +152094,7 @@ static int flattenSubquery( x.iNewTable = iNewParent; x.isOuterJoin = isOuterJoin; x.pEList = pSub->pEList; + x.pCList = findLeftmostExprlist(pSub); substSelect(&x, pParent, 0); } @@ -145592,23 +152114,22 @@ static int flattenSubquery( pSub->pLimit = 0; } - /* Recompute the SrcList_item.colUsed masks for the flattened + /* Recompute the SrcItem.colUsed masks for the flattened ** tables. */ for(i=0; ia[i+iFrom]); } } - /* Finially, delete what is left of the subquery and return - ** success. + /* Finally, delete what is left of the subquery and return success. */ sqlite3AggInfoPersistWalkerInit(&w, pParse); sqlite3WalkSelect(&w,pSub1); sqlite3SelectDelete(db, pSub1); #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p,("After flattening:\n")); + if( sqlite3TreeTrace & 0x4 ){ + TREETRACE(0x4,pParse,p,("After flattening:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif @@ -145635,7 +152156,7 @@ struct WhereConst { /* ** Add a new entry to the pConst object. Except, do not add duplicate -** pColumn entires. Also, do not add if doing so would not be appropriate. +** pColumn entries. Also, do not add if doing so would not be appropriate. ** ** The caller guarantees the pColumn is a column and pValue is a constant. ** This routine has to do some additional checks before completing the @@ -145821,7 +152342,7 @@ static int propagateConstantExprRewrite(Walker *pWalker, Expr *pExpr){ ** SELECT * FROM t1 WHERE a=123 AND b=123; ** ** The two SELECT statements above should return different answers. b=a -** is alway true because the comparison uses numeric affinity, but b=123 +** is always true because the comparison uses numeric affinity, but b=123 ** is false because it uses text affinity and '0123' is not the same as '123'. ** To work around this, the expression tree is not actually changed from ** "b=a" to "b=123" but rather the "a" in "b=a" is tagged with EP_FixedCol @@ -145905,7 +152426,7 @@ static int propagateConstants( ** At the time this function is called it is guaranteed that ** ** * the sub-query uses only one distinct window frame, and -** * that the window frame has a PARTITION BY clase. +** * that the window frame has a PARTITION BY clause. */ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ assert( pSubq->pWin->pPartition ); @@ -145982,6 +152503,33 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){ ** be materialized. (This restriction is implemented in the calling ** routine.) ** +** (8) If the subquery is a compound that uses UNION, INTERSECT, +** or EXCEPT, then all of the result set columns for all arms of +** the compound must use the BINARY collating sequence. +** +** (9) All three of the following are true: +** +** (9a) The WHERE clause expression originates in the ON or USING clause +** of a join (either an INNER or an OUTER join), and +** +** (9b) The subquery is to the right of the ON/USING clause +** +** (9c) There is a RIGHT JOIN (or FULL JOIN) in between the ON/USING +** clause and the subquery. +** +** Without this restriction, the push-down optimization might move +** the ON/USING filter expression from the left side of a RIGHT JOIN +** over to the right side, which leads to incorrect answers. See +** also restriction (6) in sqlite3ExprIsSingleTableConstraint(). +** +** (10) The inner query is not the right-hand table of a RIGHT JOIN. +** +** (11) The subquery is not a VALUES clause +** +** (12) The WHERE clause is not "rowid ISNULL" or the equivalent. This +** case only comes up if SQLite is compiled using +** SQLITE_ALLOW_ROWID_IN_VIEW. +** ** Return 0 if no changes are made and non-zero if one or more WHERE clause ** terms are duplicated into the subquery. */ @@ -145989,24 +152537,56 @@ static int pushDownWhereTerms( Parse *pParse, /* Parse context (for malloc() and error reporting) */ Select *pSubq, /* The subquery whose WHERE clause is to be augmented */ Expr *pWhere, /* The WHERE clause of the outer query */ - SrcItem *pSrc /* The subquery term of the outer FROM clause */ + SrcList *pSrcList, /* The complete from clause of the outer query */ + int iSrc /* Which FROM clause term to try to push into */ ){ Expr *pNew; + SrcItem *pSrc; /* The subquery FROM term into which WHERE is pushed */ int nChng = 0; + pSrc = &pSrcList->a[iSrc]; if( pWhere==0 ) return 0; - if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ) return 0; - if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ) return 0; + if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ){ + return 0; /* restrictions (2) and (11) */ + } + if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ){ + return 0; /* restrictions (10) */ + } -#ifndef SQLITE_OMIT_WINDOWFUNC if( pSubq->pPrior ){ Select *pSel; + int notUnionAll = 0; for(pSel=pSubq; pSel; pSel=pSel->pPrior){ + u8 op = pSel->op; + assert( op==TK_ALL || op==TK_SELECT + || op==TK_UNION || op==TK_INTERSECT || op==TK_EXCEPT ); + if( op!=TK_ALL && op!=TK_SELECT ){ + notUnionAll = 1; + } +#ifndef SQLITE_OMIT_WINDOWFUNC if( pSel->pWin ) return 0; /* restriction (6b) */ +#endif + } + if( notUnionAll ){ + /* If any of the compound arms are connected using UNION, INTERSECT, + ** or EXCEPT, then we must ensure that none of the columns use a + ** non-BINARY collating sequence. */ + for(pSel=pSubq; pSel; pSel=pSel->pPrior){ + int ii; + const ExprList *pList = pSel->pEList; + assert( pList!=0 ); + for(ii=0; iinExpr; ii++){ + CollSeq *pColl = sqlite3ExprCollSeq(pParse, pList->a[ii].pExpr); + if( !sqlite3IsBinary(pColl) ){ + return 0; /* Restriction (8) */ + } + } + } } }else{ +#ifndef SQLITE_OMIT_WINDOWFUNC if( pSubq->pWin && pSubq->pWin->pPartition==0 ) return 0; - } #endif + } #ifdef SQLITE_DEBUG /* Only the first term of a compound can have a WITH clause. But make @@ -146025,11 +152605,28 @@ static int pushDownWhereTerms( return 0; /* restriction (3) */ } while( pWhere->op==TK_AND ){ - nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, pSrc); + nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, pSrcList, iSrc); pWhere = pWhere->pLeft; } -#if 0 /* Legacy code. Checks now done by sqlite3ExprIsTableConstraint() */ +#if 0 /* These checks now done by sqlite3ExprIsSingleTableConstraint() */ + if( ExprHasProperty(pWhere, EP_OuterON|EP_InnerON) /* (9a) */ + && (pSrcList->a[0].fg.jointype & JT_LTORJ)!=0 /* Fast pre-test of (9c) */ + ){ + int jj; + for(jj=0; jjw.iJoin==pSrcList->a[jj].iCursor ){ + /* If we reach this point, both (9a) and (9b) are satisfied. + ** The following loop checks (9c): + */ + for(jj++; jja[jj].fg.jointype & JT_RIGHT)!=0 ){ + return 0; /* restriction (9) */ + } + } + } + } + } if( isLeftJoin && (ExprHasProperty(pWhere,EP_OuterON)==0 || pWhere->w.iJoin!=iCursor) @@ -146043,7 +152640,19 @@ static int pushDownWhereTerms( } #endif - if( sqlite3ExprIsTableConstraint(pWhere, pSrc) ){ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + if( ViewCanHaveRowid && (pWhere->op==TK_ISNULL || pWhere->op==TK_NOTNULL) ){ + Expr *pLeft = pWhere->pLeft; + if( ALWAYS(pLeft) + && pLeft->op==TK_COLUMN + && pLeft->iColumn < 0 + ){ + return 0; /* Restriction (12) */ + } + } +#endif + + if( sqlite3ExprIsSingleTableConstraint(pWhere, pSrcList, iSrc) ){ nChng++; pSubq->selFlags |= SF_PushDown; while( pSubq ){ @@ -146055,6 +152664,7 @@ static int pushDownWhereTerms( x.iNewTable = pSrc->iCursor; x.isOuterJoin = 0; x.pEList = pSubq->pEList; + x.pCList = findLeftmostExprlist(pSubq); pNew = substExpr(&x, pNew); #ifndef SQLITE_OMIT_WINDOWFUNC if( pSubq->pWin && 0==pushDownWindowCheck(pParse, pSubq, pNew) ){ @@ -146076,6 +152686,78 @@ static int pushDownWhereTerms( } #endif /* !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW) */ +/* +** Check to see if a subquery contains result-set columns that are +** never used. If it does, change the value of those result-set columns +** to NULL so that they do not cause unnecessary work to compute. +** +** Return the number of column that were changed to NULL. +*/ +static int disableUnusedSubqueryResultColumns(SrcItem *pItem){ + int nCol; + Select *pSub; /* The subquery to be simplified */ + Select *pX; /* For looping over compound elements of pSub */ + Table *pTab; /* The table that describes the subquery */ + int j; /* Column number */ + int nChng = 0; /* Number of columns converted to NULL */ + Bitmask colUsed; /* Columns that may not be NULLed out */ + + assert( pItem!=0 ); + if( pItem->fg.isCorrelated || pItem->fg.isCte ){ + return 0; + } + assert( pItem->pTab!=0 ); + pTab = pItem->pTab; + assert( pItem->pSelect!=0 ); + pSub = pItem->pSelect; + assert( pSub->pEList->nExpr==pTab->nCol ); + for(pX=pSub; pX; pX=pX->pPrior){ + if( (pX->selFlags & (SF_Distinct|SF_Aggregate))!=0 ){ + testcase( pX->selFlags & SF_Distinct ); + testcase( pX->selFlags & SF_Aggregate ); + return 0; + } + if( pX->pPrior && pX->op!=TK_ALL ){ + /* This optimization does not work for compound subqueries that + ** use UNION, INTERSECT, or EXCEPT. Only UNION ALL is allowed. */ + return 0; + } +#ifndef SQLITE_OMIT_WINDOWFUNC + if( pX->pWin ){ + /* This optimization does not work for subqueries that use window + ** functions. */ + return 0; + } +#endif + } + colUsed = pItem->colUsed; + if( pSub->pOrderBy ){ + ExprList *pList = pSub->pOrderBy; + for(j=0; jnExpr; j++){ + u16 iCol = pList->a[j].u.x.iOrderByCol; + if( iCol>0 ){ + iCol--; + colUsed |= ((Bitmask)1)<<(iCol>=BMS ? BMS-1 : iCol); + } + } + } + nCol = pTab->nCol; + for(j=0; jpPrior) { + Expr *pY = pX->pEList->a[j].pExpr; + if( pY->op==TK_NULL ) continue; + pY->op = TK_NULL; + ExprClearProperty(pY, EP_Skip|EP_Unlikely); + pX->selFlags |= SF_PushDown; + nChng++; + } + } + return nChng; +} + + /* ** The pFunc is the only aggregate function in the query. Check to see ** if the query is a candidate for the min/max optimization. @@ -146354,8 +153036,7 @@ static struct Cte *searchWith( SQLITE_PRIVATE With *sqlite3WithPush(Parse *pParse, With *pWith, u8 bFree){ if( pWith ){ if( bFree ){ - pWith = (With*)sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3WithDelete, + pWith = (With*)sqlite3ParserAddCleanup(pParse, sqlite3WithDeleteGeneric, pWith); if( pWith==0 ) return 0; } @@ -146467,9 +153148,6 @@ static int resolveFromTermToCte( pFrom->fg.isCte = 1; pFrom->u2.pCteUse = pCteUse; pCteUse->nUse++; - if( pCteUse->nUse>=2 && pCteUse->eM10d==M10d_Any ){ - pCteUse->eM10d = M10d_Yes; - } /* Check if this is a recursive CTE. */ pRecTerm = pSel = pFrom->pSelect; @@ -146579,9 +153257,9 @@ SQLITE_PRIVATE void sqlite3SelectPopWith(Walker *pWalker, Select *p){ #endif /* -** The SrcList_item structure passed as the second argument represents a +** The SrcItem structure passed as the second argument represents a ** sub-query in the FROM clause of a SELECT statement. This function -** allocates and populates the SrcList_item.pTab object. If successful, +** allocates and populates the SrcItem.pTab object. If successful, ** SQLITE_OK is returned. Otherwise, if an OOM error is encountered, ** SQLITE_NOMEM. */ @@ -146601,12 +153279,14 @@ SQLITE_PRIVATE int sqlite3ExpandSubquery(Parse *pParse, SrcItem *pFrom){ while( pSel->pPrior ){ pSel = pSel->pPrior; } sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol); pTab->iPKey = -1; + pTab->eTabType = TABTYP_VIEW; pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) ); #ifndef SQLITE_ALLOW_ROWID_IN_VIEW /* The usual case - do not allow ROWID on a subquery */ pTab->tabFlags |= TF_Ephemeral | TF_NoVisibleRowid; #else - pTab->tabFlags |= TF_Ephemeral; /* Legacy compatibility mode */ + /* Legacy compatibility mode */ + pTab->tabFlags |= TF_Ephemeral | sqlite3Config.mNoVisibleRowid; #endif return pParse->nErr ? SQLITE_ERROR : SQLITE_OK; } @@ -146843,12 +153523,20 @@ static int selectExpander(Walker *pWalker, Select *p){ ** expanded. */ int tableSeen = 0; /* Set to 1 when TABLE matches */ char *zTName = 0; /* text of name of TABLE */ + int iErrOfst; if( pE->op==TK_DOT ){ + assert( (selFlags & SF_NestedFrom)==0 ); assert( pE->pLeft!=0 ); assert( !ExprHasProperty(pE->pLeft, EP_IntValue) ); zTName = pE->pLeft->u.zToken; + assert( ExprUseWOfst(pE->pLeft) ); + iErrOfst = pE->pRight->w.iOfst; + }else{ + assert( ExprUseWOfst(pE) ); + iErrOfst = pE->w.iOfst; } for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ + int nAdd; /* Number of cols including rowid */ Table *pTab = pFrom->pTab; /* Table for this data source */ ExprList *pNestedFrom; /* Result-set of a nested FROM clause */ char *zTabName; /* AS name for this data source */ @@ -146866,6 +153554,7 @@ static int selectExpander(Walker *pWalker, Select *p){ pNestedFrom = pFrom->pSelect->pEList; assert( pNestedFrom!=0 ); assert( pNestedFrom->nExpr==pTab->nCol ); + assert( VisibleRowid(pTab)==0 || ViewCanHaveRowid ); }else{ if( zTName && sqlite3StrICmp(zTName, zTabName)!=0 ){ continue; @@ -146883,6 +153572,7 @@ static int selectExpander(Walker *pWalker, Select *p){ for(ii=0; iinId; ii++){ const char *zUName = pUsing->a[ii].zName; pRight = sqlite3Expr(db, TK_ID, zUName); + sqlite3ExprSetErrorOffset(pRight, iErrOfst); pNew = sqlite3ExprListAppend(pParse, pNew, pRight); if( pNew ){ struct ExprList_item *pX = &pNew->a[pNew->nExpr-1]; @@ -146895,33 +153585,49 @@ static int selectExpander(Walker *pWalker, Select *p){ }else{ pUsing = 0; } - for(j=0; jnCol; j++){ - char *zName = pTab->aCol[j].zCnName; + + nAdd = pTab->nCol; + if( VisibleRowid(pTab) && (selFlags & SF_NestedFrom)!=0 ) nAdd++; + for(j=0; ja[j], 0, zTName, 0)==0 - ){ - continue; - } + if( j==pTab->nCol ){ + zName = sqlite3RowidAlias(pTab); + if( zName==0 ) continue; + }else{ + zName = pTab->aCol[j].zCnName; - /* If a column is marked as 'hidden', omit it from the expanded - ** result-set list unless the SELECT has the SF_IncludeHidden - ** bit set. - */ - if( (p->selFlags & SF_IncludeHidden)==0 - && IsHiddenColumn(&pTab->aCol[j]) - ){ - continue; - } - if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0 - && zTName==0 - && (selFlags & (SF_NestedFrom))==0 - ){ - continue; + /* If pTab is actually an SF_NestedFrom sub-select, do not + ** expand any ENAME_ROWID columns. */ + if( pNestedFrom && pNestedFrom->a[j].fg.eEName==ENAME_ROWID ){ + continue; + } + + if( zTName + && pNestedFrom + && sqlite3MatchEName(&pNestedFrom->a[j], 0, zTName, 0, 0)==0 + ){ + continue; + } + + /* If a column is marked as 'hidden', omit it from the expanded + ** result-set list unless the SELECT has the SF_IncludeHidden + ** bit set. + */ + if( (p->selFlags & SF_IncludeHidden)==0 + && IsHiddenColumn(&pTab->aCol[j]) + ){ + continue; + } + if( (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0 + && zTName==0 + && (selFlags & (SF_NestedFrom))==0 + ){ + continue; + } } + assert( zName ); tableSeen = 1; if( i>0 && zTName==0 && (selFlags & SF_NestedFrom)==0 ){ @@ -146955,6 +153661,7 @@ static int selectExpander(Walker *pWalker, Select *p){ }else{ pExpr = pRight; } + sqlite3ExprSetErrorOffset(pExpr, iErrOfst); pNew = sqlite3ExprListAppend(pParse, pNew, pExpr); if( pNew==0 ){ break; /* OOM */ @@ -146962,7 +153669,8 @@ static int selectExpander(Walker *pWalker, Select *p){ pX = &pNew->a[pNew->nExpr-1]; assert( pX->zEName==0 ); if( (selFlags & SF_NestedFrom)!=0 && !IN_RENAME_OBJECT ){ - if( pNestedFrom ){ + if( pNestedFrom && (!ViewCanHaveRowid || jnExpr) ){ + assert( jnExpr ); pX->zEName = sqlite3DbStrDup(db, pNestedFrom->a[j].zEName); testcase( pX->zEName==0 ); }else{ @@ -146970,11 +153678,11 @@ static int selectExpander(Walker *pWalker, Select *p){ zSchemaName, zTabName, zName); testcase( pX->zEName==0 ); } - pX->fg.eEName = ENAME_TAB; + pX->fg.eEName = (j==pTab->nCol ? ENAME_ROWID : ENAME_TAB); if( (pFrom->fg.isUsing && sqlite3IdListIndex(pFrom->u3.pUsing, zName)>=0) || (pUsing && sqlite3IdListIndex(pUsing, zName)>=0) - || (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)!=0 + || (jnCol && (pTab->aCol[j].colFlags & COLFLAG_NOEXPAND)) ){ pX->fg.bNoExpand = 1; } @@ -147009,8 +153717,8 @@ static int selectExpander(Walker *pWalker, Select *p){ } } #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p,("After result-set wildcard expansion:\n")); + if( sqlite3TreeTrace & 0x8 ){ + TREETRACE(0x8,pParse,p,("After result-set wildcard expansion:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif @@ -147061,14 +153769,14 @@ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){ ** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo() ** interface. ** -** For each FROM-clause subquery, add Column.zType and Column.zColl -** information to the Table structure that represents the result set -** of that subquery. +** For each FROM-clause subquery, add Column.zType, Column.zColl, and +** Column.affinity information to the Table structure that represents +** the result set of that subquery. ** ** The Table structure that represents the result set was constructed -** by selectExpander() but the type and collation information was omitted -** at that point because identifiers had not yet been resolved. This -** routine is called after identifier resolution. +** by selectExpander() but the type and collation and affinity information +** was omitted at that point because identifiers had not yet been resolved. +** This routine is called after identifier resolution. */ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ Parse *pParse; @@ -147076,10 +153784,11 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ SrcList *pTabList; SrcItem *pFrom; - assert( p->selFlags & SF_Resolved ); if( p->selFlags & SF_HasTypeInfo ) return; p->selFlags |= SF_HasTypeInfo; pParse = pWalker->pParse; + testcase( (p->selFlags & SF_Resolved)==0 ); + assert( (p->selFlags & SF_Resolved) || IN_RENAME_OBJECT ); pTabList = p->pSrc; for(i=0, pFrom=pTabList->a; inSrc; i++, pFrom++){ Table *pTab = pFrom->pTab; @@ -147088,9 +153797,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){ /* A sub-query in the FROM clause of a SELECT */ Select *pSel = pFrom->pSelect; if( pSel ){ - while( pSel->pPrior ) pSel = pSel->pPrior; - sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSel, - SQLITE_AFF_NONE); + sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE); } } } @@ -147145,6 +153852,184 @@ SQLITE_PRIVATE void sqlite3SelectPrep( sqlite3SelectAddTypeInfo(pParse, p); } +#if TREETRACE_ENABLED +/* +** Display all information about an AggInfo object +*/ +static void printAggInfo(AggInfo *pAggInfo){ + int ii; + for(ii=0; iinColumn; ii++){ + struct AggInfo_col *pCol = &pAggInfo->aCol[ii]; + sqlite3DebugPrintf( + "agg-column[%d] pTab=%s iTable=%d iColumn=%d iMem=%d" + " iSorterColumn=%d %s\n", + ii, pCol->pTab ? pCol->pTab->zName : "NULL", + pCol->iTable, pCol->iColumn, pAggInfo->iFirstReg+ii, + pCol->iSorterColumn, + ii>=pAggInfo->nAccumulator ? "" : " Accumulator"); + sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0); + } + for(ii=0; iinFunc; ii++){ + sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n", + ii, pAggInfo->iFirstReg+pAggInfo->nColumn+ii); + sqlite3TreeViewExpr(0, pAggInfo->aFunc[ii].pFExpr, 0); + } +} +#endif /* TREETRACE_ENABLED */ + +/* +** Analyze the arguments to aggregate functions. Create new pAggInfo->aCol[] +** entries for columns that are arguments to aggregate functions but which +** are not otherwise used. +** +** The aCol[] entries in AggInfo prior to nAccumulator are columns that +** are referenced outside of aggregate functions. These might be columns +** that are part of the GROUP by clause, for example. Other database engines +** would throw an error if there is a column reference that is not in the +** GROUP BY clause and that is not part of an aggregate function argument. +** But SQLite allows this. +** +** The aCol[] entries beginning with the aCol[nAccumulator] and following +** are column references that are used exclusively as arguments to +** aggregate functions. This routine is responsible for computing +** (or recomputing) those aCol[] entries. +*/ +static void analyzeAggFuncArgs( + AggInfo *pAggInfo, + NameContext *pNC +){ + int i; + assert( pAggInfo!=0 ); + assert( pAggInfo->iFirstReg==0 ); + pNC->ncFlags |= NC_InAggFunc; + for(i=0; inFunc; i++){ + Expr *pExpr = pAggInfo->aFunc[i].pFExpr; + assert( pExpr->op==TK_FUNCTION || pExpr->op==TK_AGG_FUNCTION ); + assert( ExprUseXList(pExpr) ); + sqlite3ExprAnalyzeAggList(pNC, pExpr->x.pList); + if( pExpr->pLeft ){ + assert( pExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pExpr->pLeft) ); + sqlite3ExprAnalyzeAggList(pNC, pExpr->pLeft->x.pList); + } +#ifndef SQLITE_OMIT_WINDOWFUNC + assert( !IsWindowFunc(pExpr) ); + if( ExprHasProperty(pExpr, EP_WinFunc) ){ + sqlite3ExprAnalyzeAggregates(pNC, pExpr->y.pWin->pFilter); + } +#endif + } + pNC->ncFlags &= ~NC_InAggFunc; +} + +/* +** An index on expressions is being used in the inner loop of an +** aggregate query with a GROUP BY clause. This routine attempts +** to adjust the AggInfo object to take advantage of index and to +** perhaps use the index as a covering index. +** +*/ +static void optimizeAggregateUseOfIndexedExpr( + Parse *pParse, /* Parsing context */ + Select *pSelect, /* The SELECT statement being processed */ + AggInfo *pAggInfo, /* The aggregate info */ + NameContext *pNC /* Name context used to resolve agg-func args */ +){ + assert( pAggInfo->iFirstReg==0 ); + assert( pSelect!=0 ); + assert( pSelect->pGroupBy!=0 ); + pAggInfo->nColumn = pAggInfo->nAccumulator; + if( ALWAYS(pAggInfo->nSortingColumn>0) ){ + int mx = pSelect->pGroupBy->nExpr - 1; + int j, k; + for(j=0; jnColumn; j++){ + k = pAggInfo->aCol[j].iSorterColumn; + if( k>mx ) mx = k; + } + pAggInfo->nSortingColumn = mx+1; + } + analyzeAggFuncArgs(pAggInfo, pNC); +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x20 ){ + IndexedExpr *pIEpr; + TREETRACE(0x20, pParse, pSelect, + ("AggInfo (possibly) adjusted for Indexed Exprs\n")); + sqlite3TreeViewSelect(0, pSelect, 0); + for(pIEpr=pParse->pIdxEpr; pIEpr; pIEpr=pIEpr->pIENext){ + printf("data-cursor=%d index={%d,%d}\n", + pIEpr->iDataCur, pIEpr->iIdxCur, pIEpr->iIdxCol); + sqlite3TreeViewExpr(0, pIEpr->pExpr, 0); + } + printAggInfo(pAggInfo); + } +#else + UNUSED_PARAMETER(pSelect); + UNUSED_PARAMETER(pParse); +#endif +} + +/* +** Walker callback for aggregateConvertIndexedExprRefToColumn(). +*/ +static int aggregateIdxEprRefToColCallback(Walker *pWalker, Expr *pExpr){ + AggInfo *pAggInfo; + struct AggInfo_col *pCol; + UNUSED_PARAMETER(pWalker); + if( pExpr->pAggInfo==0 ) return WRC_Continue; + if( pExpr->op==TK_AGG_COLUMN ) return WRC_Continue; + if( pExpr->op==TK_AGG_FUNCTION ) return WRC_Continue; + if( pExpr->op==TK_IF_NULL_ROW ) return WRC_Continue; + pAggInfo = pExpr->pAggInfo; + if( NEVER(pExpr->iAgg>=pAggInfo->nColumn) ) return WRC_Continue; + assert( pExpr->iAgg>=0 ); + pCol = &pAggInfo->aCol[pExpr->iAgg]; + pExpr->op = TK_AGG_COLUMN; + pExpr->iTable = pCol->iTable; + pExpr->iColumn = pCol->iColumn; + ExprClearProperty(pExpr, EP_Skip|EP_Collate|EP_Unlikely); + return WRC_Prune; +} + +/* +** Convert every pAggInfo->aFunc[].pExpr such that any node within +** those expressions that has pAppInfo set is changed into a TK_AGG_COLUMN +** opcode. +*/ +static void aggregateConvertIndexedExprRefToColumn(AggInfo *pAggInfo){ + int i; + Walker w; + memset(&w, 0, sizeof(w)); + w.xExprCallback = aggregateIdxEprRefToColCallback; + for(i=0; inFunc; i++){ + sqlite3WalkExpr(&w, pAggInfo->aFunc[i].pFExpr); + } +} + + +/* +** Allocate a block of registers so that there is one register for each +** pAggInfo->aCol[] and pAggInfo->aFunc[] entry in pAggInfo. The first +** register in this block is stored in pAggInfo->iFirstReg. +** +** This routine may only be called once for each AggInfo object. Prior +** to calling this routine: +** +** * The aCol[] and aFunc[] arrays may be modified +** * The AggInfoColumnReg() and AggInfoFuncReg() macros may not be used +** +** After calling this routine: +** +** * The aCol[] and aFunc[] arrays are fixed +** * The AggInfoColumnReg() and AggInfoFuncReg() macros may be used +** +*/ +static void assignAggregateRegisters(Parse *pParse, AggInfo *pAggInfo){ + assert( pAggInfo!=0 ); + assert( pAggInfo->iFirstReg==0 ); + pAggInfo->iFirstReg = pParse->nMem + 1; + pParse->nMem += pAggInfo->nColumn + pAggInfo->nFunc; +} + /* ** Reset the aggregate accumulator. ** @@ -147158,24 +154043,13 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ int i; struct AggInfo_func *pFunc; int nReg = pAggInfo->nFunc + pAggInfo->nColumn; + assert( pAggInfo->iFirstReg>0 ); assert( pParse->db->pParse==pParse ); assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 ); if( nReg==0 ) return; if( pParse->nErr ) return; -#ifdef SQLITE_DEBUG - /* Verify that all AggInfo registers are within the range specified by - ** AggInfo.mnReg..AggInfo.mxReg */ - assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 ); - for(i=0; inColumn; i++){ - assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg - && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg ); - } - for(i=0; inFunc; i++){ - assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg - && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg ); - } -#endif - sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg); + sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->iFirstReg, + pAggInfo->iFirstReg+nReg-1); for(pFunc=pAggInfo->aFunc, i=0; inFunc; i++, pFunc++){ if( pFunc->iDistinct>=0 ){ Expr *pE = pFunc->pFExpr; @@ -147192,6 +154066,36 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){ pFunc->pFunc->zName)); } } + if( pFunc->iOBTab>=0 ){ + ExprList *pOBList; + KeyInfo *pKeyInfo; + int nExtra = 0; + assert( pFunc->pFExpr->pLeft!=0 ); + assert( pFunc->pFExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pFunc->pFExpr->pLeft) ); + assert( pFunc->pFunc!=0 ); + pOBList = pFunc->pFExpr->pLeft->x.pList; + if( !pFunc->bOBUnique ){ + nExtra++; /* One extra column for the OP_Sequence */ + } + if( pFunc->bOBPayload ){ + /* extra columns for the function arguments */ + assert( ExprUseXList(pFunc->pFExpr) ); + nExtra += pFunc->pFExpr->x.pList->nExpr; + } + if( pFunc->bUseSubtype ){ + nExtra += pFunc->pFExpr->x.pList->nExpr; + } + pKeyInfo = sqlite3KeyInfoFromExprList(pParse, pOBList, 0, nExtra); + if( !pFunc->bOBUnique && pParse->nErr==0 ){ + pKeyInfo->nKeyField++; + } + sqlite3VdbeAddOp4(v, OP_OpenEphemeral, + pFunc->iOBTab, pOBList->nExpr+nExtra, 0, + (char*)pKeyInfo, P4_KEYINFO); + ExplainQueryPlan((pParse, 0, "USE TEMP B-TREE FOR %s(ORDER BY)", + pFunc->pFunc->zName)); + } } } @@ -147207,20 +154111,71 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){ ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); pList = pF->pFExpr->x.pList; - sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0); + if( pF->iOBTab>=0 ){ + /* For an ORDER BY aggregate, calls to OP_AggStep were deferred. Inputs + ** were stored in emphermal table pF->iOBTab. Here, we extract those + ** inputs (in ORDER BY order) and make all calls to OP_AggStep + ** before doing the OP_AggFinal call. */ + int iTop; /* Start of loop for extracting columns */ + int nArg; /* Number of columns to extract */ + int nKey; /* Key columns to be skipped */ + int regAgg; /* Extract into this array */ + int j; /* Loop counter */ + + assert( pF->pFunc!=0 ); + nArg = pList->nExpr; + regAgg = sqlite3GetTempRange(pParse, nArg); + + if( pF->bOBPayload==0 ){ + nKey = 0; + }else{ + assert( pF->pFExpr->pLeft!=0 ); + assert( ExprUseXList(pF->pFExpr->pLeft) ); + assert( pF->pFExpr->pLeft->x.pList!=0 ); + nKey = pF->pFExpr->pLeft->x.pList->nExpr; + if( ALWAYS(!pF->bOBUnique) ) nKey++; + } + iTop = sqlite3VdbeAddOp1(v, OP_Rewind, pF->iOBTab); VdbeCoverage(v); + for(j=nArg-1; j>=0; j--){ + sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, nKey+j, regAgg+j); + } + if( pF->bUseSubtype ){ + int regSubtype = sqlite3GetTempReg(pParse); + int iBaseCol = nKey + nArg + (pF->bOBPayload==0 && pF->bOBUnique==0); + for(j=nArg-1; j>=0; j--){ + sqlite3VdbeAddOp3(v, OP_Column, pF->iOBTab, iBaseCol+j, regSubtype); + sqlite3VdbeAddOp2(v, OP_SetSubtype, regSubtype, regAgg+j); + } + sqlite3ReleaseTempReg(pParse, regSubtype); + } + sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3VdbeAddOp2(v, OP_Next, pF->iOBTab, iTop+1); VdbeCoverage(v); + sqlite3VdbeJumpHere(v, iTop); + sqlite3ReleaseTempRange(pParse, regAgg, nArg); + } + sqlite3VdbeAddOp2(v, OP_AggFinal, AggInfoFuncReg(pAggInfo,i), + pList ? pList->nExpr : 0); sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); } } - /* -** Update the accumulator memory cells for an aggregate based on -** the current cursor position. +** Generate code that will update the accumulator memory cells for an +** aggregate based on the current cursor position. ** ** If regAcc is non-zero and there are no min() or max() aggregates ** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator ** registers if register regAcc contains 0. The caller will take care ** of setting and clearing regAcc. +** +** For an ORDER BY aggregate, the actual accumulator memory cell update +** is deferred until after all input rows have been received, so that they +** can be run in the requested order. In that case, instead of invoking +** OP_AggStep to update the accumulator, just add the arguments that would +** have been passed into OP_AggStep into the sorting ephemeral table +** (along with the appropriate sort key). */ static void updateAccumulator( Parse *pParse, @@ -147235,14 +154190,19 @@ static void updateAccumulator( struct AggInfo_func *pF; struct AggInfo_col *pC; + assert( pAggInfo->iFirstReg>0 ); + if( pParse->nErr ) return; pAggInfo->directMode = 1; for(i=0, pF=pAggInfo->aFunc; inFunc; i++, pF++){ int nArg; int addrNext = 0; int regAgg; + int regAggSz = 0; + int regDistinct = 0; ExprList *pList; assert( ExprUseXList(pF->pFExpr) ); assert( !IsWindowFunc(pF->pFExpr) ); + assert( pF->pFunc!=0 ); pList = pF->pFExpr->x.pList; if( ExprHasProperty(pF->pFExpr, EP_WinFunc) ){ Expr *pFilter = pF->pFExpr->y.pWin->pFilter; @@ -147266,9 +154226,55 @@ static void updateAccumulator( addrNext = sqlite3VdbeMakeLabel(pParse); sqlite3ExprIfFalse(pParse, pFilter, addrNext, SQLITE_JUMPIFNULL); } - if( pList ){ + if( pF->iOBTab>=0 ){ + /* Instead of invoking AggStep, we must push the arguments that would + ** have been passed to AggStep onto the sorting table. */ + int jj; /* Registered used so far in building the record */ + ExprList *pOBList; /* The ORDER BY clause */ + assert( pList!=0 ); + nArg = pList->nExpr; + assert( nArg>0 ); + assert( pF->pFExpr->pLeft!=0 ); + assert( pF->pFExpr->pLeft->op==TK_ORDER ); + assert( ExprUseXList(pF->pFExpr->pLeft) ); + pOBList = pF->pFExpr->pLeft->x.pList; + assert( pOBList!=0 ); + assert( pOBList->nExpr>0 ); + regAggSz = pOBList->nExpr; + if( !pF->bOBUnique ){ + regAggSz++; /* One register for OP_Sequence */ + } + if( pF->bOBPayload ){ + regAggSz += nArg; + } + if( pF->bUseSubtype ){ + regAggSz += nArg; + } + regAggSz++; /* One extra register to hold result of MakeRecord */ + regAgg = sqlite3GetTempRange(pParse, regAggSz); + regDistinct = regAgg; + sqlite3ExprCodeExprList(pParse, pOBList, regAgg, 0, SQLITE_ECEL_DUP); + jj = pOBList->nExpr; + if( !pF->bOBUnique ){ + sqlite3VdbeAddOp2(v, OP_Sequence, pF->iOBTab, regAgg+jj); + jj++; + } + if( pF->bOBPayload ){ + regDistinct = regAgg+jj; + sqlite3ExprCodeExprList(pParse, pList, regDistinct, 0, SQLITE_ECEL_DUP); + jj += nArg; + } + if( pF->bUseSubtype ){ + int kk; + int regBase = pF->bOBPayload ? regDistinct : regAgg; + for(kk=0; kknExpr; regAgg = sqlite3GetTempRange(pParse, nArg); + regDistinct = regAgg; sqlite3ExprCodeExprList(pParse, pList, regAgg, 0, SQLITE_ECEL_DUP); }else{ nArg = 0; @@ -147279,26 +154285,37 @@ static void updateAccumulator( addrNext = sqlite3VdbeMakeLabel(pParse); } pF->iDistinct = codeDistinct(pParse, eDistinctType, - pF->iDistinct, addrNext, pList, regAgg); - } - if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ - CollSeq *pColl = 0; - struct ExprList_item *pItem; - int j; - assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ - for(j=0, pItem=pList->a; !pColl && jpExpr); - } - if( !pColl ){ - pColl = pParse->db->pDfltColl; + pF->iDistinct, addrNext, pList, regDistinct); + } + if( pF->iOBTab>=0 ){ + /* Insert a new record into the ORDER BY table */ + sqlite3VdbeAddOp3(v, OP_MakeRecord, regAgg, regAggSz-1, + regAgg+regAggSz-1); + sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pF->iOBTab, regAgg+regAggSz-1, + regAgg, regAggSz-1); + sqlite3ReleaseTempRange(pParse, regAgg, regAggSz); + }else{ + /* Invoke the AggStep function */ + if( pF->pFunc->funcFlags & SQLITE_FUNC_NEEDCOLL ){ + CollSeq *pColl = 0; + struct ExprList_item *pItem; + int j; + assert( pList!=0 ); /* pList!=0 if pF->pFunc has NEEDCOLL */ + for(j=0, pItem=pList->a; !pColl && jpExpr); + } + if( !pColl ){ + pColl = pParse->db->pDfltColl; + } + if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; + sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, + (char *)pColl, P4_COLLSEQ); } - if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem; - sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ); + sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i)); + sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); + sqlite3VdbeChangeP5(v, (u8)nArg); + sqlite3ReleaseTempRange(pParse, regAgg, nArg); } - sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, pF->iMem); - sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF); - sqlite3VdbeChangeP5(v, (u8)nArg); - sqlite3ReleaseTempRange(pParse, regAgg, nArg); if( addrNext ){ sqlite3VdbeResolveLabel(v, addrNext); } @@ -147310,7 +154327,7 @@ static void updateAccumulator( addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v); } for(i=0, pC=pAggInfo->aCol; inAccumulator; i++, pC++){ - sqlite3ExprCode(pParse, pC->pCExpr, pC->iMem); + sqlite3ExprCode(pParse, pC->pCExpr, AggInfoColumnReg(pAggInfo,i)); } pAggInfo->directMode = 0; @@ -147406,26 +154423,31 @@ static void havingToWhere(Parse *pParse, Select *p){ sqlite3WalkExpr(&sWalker, p->pHaving); #if TREETRACE_ENABLED if( sWalker.eCode && (sqlite3TreeTrace & 0x100)!=0 ){ - SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n")); + TREETRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif } /* -** Check to see if the pThis entry of pTabList is a self-join of a prior view. -** If it is, then return the SrcList_item for the prior view. If it is not, -** then return 0. +** Check to see if the pThis entry of pTabList is a self-join of another view. +** Search FROM-clause entries in the range of iFirst..iEnd, including iFirst +** but stopping before iEnd. +** +** If pThis is a self-join, then return the SrcItem for the first other +** instance of that view found. If pThis is not a self-join then return 0. */ static SrcItem *isSelfJoinView( SrcList *pTabList, /* Search for self-joins in this FROM clause */ - SrcItem *pThis /* Search for prior reference to this subquery */ + SrcItem *pThis, /* Search for prior reference to this subquery */ + int iFirst, int iEnd /* Range of FROM-clause entries to search. */ ){ SrcItem *pItem; assert( pThis->pSelect!=0 ); if( pThis->pSelect->selFlags & SF_PushDown ) return 0; - for(pItem = pTabList->a; pItema[iFirst++]; if( pItem->pSelect==0 ) continue; if( pItem->fg.viaCoroutine ) continue; if( pItem->zName==0 ) continue; @@ -147452,13 +154474,13 @@ static SrcItem *isSelfJoinView( /* ** Deallocate a single AggInfo object */ -static void agginfoFree(sqlite3 *db, AggInfo *p){ +static void agginfoFree(sqlite3 *db, void *pArg){ + AggInfo *p = (AggInfo*)pArg; sqlite3DbFree(db, p->aCol); sqlite3DbFree(db, p->aFunc); sqlite3DbFreeNN(db, p); } -#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION /* ** Attempt to transform a query of the form ** @@ -147486,7 +154508,9 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ if( (p->selFlags & SF_Aggregate)==0 ) return 0; /* This is an aggregate */ if( p->pEList->nExpr!=1 ) return 0; /* Single result column */ if( p->pWhere ) return 0; + if( p->pHaving ) return 0; if( p->pGroupBy ) return 0; + if( p->pOrderBy ) return 0; pExpr = p->pEList->a[0].pExpr; if( pExpr->op!=TK_AGG_FUNCTION ) return 0; /* Result is an aggregate */ assert( ExprUseUToken(pExpr) ); @@ -147494,15 +154518,18 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ assert( ExprUseXList(pExpr) ); if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */ if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */ + if( ExprHasProperty(pExpr, EP_WinFunc) ) return 0;/* Not a window function */ pSub = p->pSrc->a[0].pSelect; if( pSub==0 ) return 0; /* The FROM is a subquery */ - if( pSub->pPrior==0 ) return 0; /* Must be a compound ry */ + if( pSub->pPrior==0 ) return 0; /* Must be a compound */ + if( pSub->selFlags & SF_CopyCte ) return 0; /* Not a CTE */ do{ if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */ if( pSub->pWhere ) return 0; /* No WHERE clause */ if( pSub->pLimit ) return 0; /* No LIMIT clause */ if( pSub->selFlags & SF_Aggregate ) return 0; /* Not an aggregate */ - pSub = pSub->pPrior; /* Repeat over compound */ + assert( pSub->pHaving==0 ); /* Due to the previous */ + pSub = pSub->pPrior; /* Repeat over compound */ }while( pSub ); /* If we reach this point then it is OK to perform the transformation */ @@ -147522,7 +154549,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ pSub->selFlags |= SF_Aggregate; pSub->selFlags &= ~SF_Compound; pSub->nSelectRow = 0; - sqlite3ExprListDelete(db, pSub->pEList); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, pSub->pEList); pTerm = pPrior ? sqlite3ExprDup(db, pCount, 0) : pCount; pSub->pEList = sqlite3ExprListAppend(pParse, 0, pTerm); pTerm = sqlite3PExpr(pParse, TK_SELECT, 0, 0); @@ -147538,14 +154565,13 @@ static int countOfViewOptimization(Parse *pParse, Select *p){ p->selFlags &= ~SF_Aggregate; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n")); + if( sqlite3TreeTrace & 0x200 ){ + TREETRACE(0x200,pParse,p,("After count-of-view optimization:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif return 1; } -#endif /* SQLITE_COUNTOFVIEW_OPTIMIZATION */ /* ** If any term of pSrc, or any SF_NestedFrom sub-query, is not the same @@ -147570,6 +154596,68 @@ static int sameSrcAlias(SrcItem *p0, SrcList *pSrc){ return 0; } +/* +** Return TRUE (non-zero) if the i-th entry in the pTabList SrcList can +** be implemented as a co-routine. The i-th entry is guaranteed to be +** a subquery. +** +** The subquery is implemented as a co-routine if all of the following are +** true: +** +** (1) The subquery will likely be implemented in the outer loop of +** the query. This will be the case if any one of the following +** conditions hold: +** (a) The subquery is the only term in the FROM clause +** (b) The subquery is the left-most term and a CROSS JOIN or similar +** requires it to be the outer loop +** (c) All of the following are true: +** (i) The subquery is the left-most subquery in the FROM clause +** (ii) There is nothing that would prevent the subquery from +** being used as the outer loop if the sqlite3WhereBegin() +** routine nominates it to that position. +** (iii) The query is not a UPDATE ... FROM +** (2) The subquery is not a CTE that should be materialized because +** (a) the AS MATERIALIZED keyword is used, or +** (b) the CTE is used multiple times and does not have the +** NOT MATERIALIZED keyword +** (3) The subquery is not part of a left operand for a RIGHT JOIN +** (4) The SQLITE_Coroutine optimization disable flag is not set +** (5) The subquery is not self-joined +*/ +static int fromClauseTermCanBeCoroutine( + Parse *pParse, /* Parsing context */ + SrcList *pTabList, /* FROM clause */ + int i, /* Which term of the FROM clause holds the subquery */ + int selFlags /* Flags on the SELECT statement */ +){ + SrcItem *pItem = &pTabList->a[i]; + if( pItem->fg.isCte ){ + const CteUse *pCteUse = pItem->u2.pCteUse; + if( pCteUse->eM10d==M10d_Yes ) return 0; /* (2a) */ + if( pCteUse->nUse>=2 && pCteUse->eM10d!=M10d_No ) return 0; /* (2b) */ + } + if( pTabList->a[0].fg.jointype & JT_LTORJ ) return 0; /* (3) */ + if( OptimizationDisabled(pParse->db, SQLITE_Coroutines) ) return 0; /* (4) */ + if( isSelfJoinView(pTabList, pItem, i+1, pTabList->nSrc)!=0 ){ + return 0; /* (5) */ + } + if( i==0 ){ + if( pTabList->nSrc==1 ) return 1; /* (1a) */ + if( pTabList->a[1].fg.jointype & JT_CROSS ) return 1; /* (1b) */ + if( selFlags & SF_UpdateFrom ) return 0; /* (1c-iii) */ + return 1; + } + if( selFlags & SF_UpdateFrom ) return 0; /* (1c-iii) */ + while( 1 /*exit-by-break*/ ){ + if( pItem->fg.jointype & (JT_OUTER|JT_CROSS) ) return 0; /* (1c-ii) */ + if( i==0 ) break; + i--; + pItem--; + if( pItem->pSelect!=0 ) return 0; /* (1c-i) */ + } + return 1; +} + /* ** Generate code for the SELECT statement given in the p argument. ** @@ -147615,8 +154703,8 @@ SQLITE_PRIVATE int sqlite3Select( assert( db->mallocFailed==0 ); if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1; #if TREETRACE_ENABLED - SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain)); - if( sqlite3TreeTrace & 0x10100 ){ + TREETRACE(0x1,pParse,p, ("begin processing:\n", pParse->addrExplain)); + if( sqlite3TreeTrace & 0x10000 ){ if( (sqlite3TreeTrace & 0x10001)==0x10000 ){ sqlite3TreeViewLine(0, "In sqlite3Select() at %s:%d", __FILE__, __LINE__); @@ -147636,14 +154724,13 @@ SQLITE_PRIVATE int sqlite3Select( /* All of these destinations are also able to ignore the ORDER BY clause */ if( p->pOrderBy ){ #if TREETRACE_ENABLED - SELECTTRACE(1,pParse,p, ("dropping superfluous ORDER BY:\n")); - if( sqlite3TreeTrace & 0x100 ){ + TREETRACE(0x800,pParse,p, ("dropping superfluous ORDER BY:\n")); + if( sqlite3TreeTrace & 0x800 ){ sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY"); } #endif - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - p->pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, + p->pOrderBy); testcase( pParse->earlyCleanup ); p->pOrderBy = 0; } @@ -147657,8 +154744,8 @@ SQLITE_PRIVATE int sqlite3Select( assert( db->mallocFailed==0 ); assert( p->pEList!=0 ); #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x104 ){ - SELECTTRACE(0x104,pParse,p, ("after name resolution:\n")); + if( sqlite3TreeTrace & 0x10 ){ + TREETRACE(0x10,pParse,p, ("after name resolution:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif @@ -147699,8 +154786,8 @@ SQLITE_PRIVATE int sqlite3Select( goto select_end; } #if TREETRACE_ENABLED - if( p->pWin && (sqlite3TreeTrace & 0x108)!=0 ){ - SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n")); + if( p->pWin && (sqlite3TreeTrace & 0x40)!=0 ){ + TREETRACE(0x40,pParse,p, ("after window rewrite:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif @@ -147724,22 +154811,58 @@ SQLITE_PRIVATE int sqlite3Select( ** to a real table */ assert( pTab!=0 ); - /* Convert LEFT JOIN into JOIN if there are terms of the right table - ** of the LEFT JOIN used in the WHERE clause. + /* Try to simplify joins: + ** + ** LEFT JOIN -> JOIN + ** RIGHT JOIN -> JOIN + ** FULL JOIN -> RIGHT JOIN + ** + ** If terms of the i-th table are used in the WHERE clause in such a + ** way that the i-th table cannot be the NULL row of a join, then + ** perform the appropriate simplification. This is called + ** "OUTER JOIN strength reduction" in the SQLite documentation. */ - if( (pItem->fg.jointype & (JT_LEFT|JT_RIGHT))==JT_LEFT - && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor) + if( (pItem->fg.jointype & (JT_LEFT|JT_LTORJ))!=0 + && sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor, + pItem->fg.jointype & JT_LTORJ) && OptimizationEnabled(db, SQLITE_SimplifyJoin) ){ - SELECTTRACE(0x100,pParse,p, - ("LEFT-JOIN simplifies to JOIN on term %d\n",i)); - pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER); - assert( pItem->iCursor>=0 ); - unsetJoinExpr(p->pWhere, pItem->iCursor, - pTabList->a[0].fg.jointype & JT_LTORJ); + if( pItem->fg.jointype & JT_LEFT ){ + if( pItem->fg.jointype & JT_RIGHT ){ + TREETRACE(0x1000,pParse,p, + ("FULL-JOIN simplifies to RIGHT-JOIN on term %d\n",i)); + pItem->fg.jointype &= ~JT_LEFT; + }else{ + TREETRACE(0x1000,pParse,p, + ("LEFT-JOIN simplifies to JOIN on term %d\n",i)); + pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER); + unsetJoinExpr(p->pWhere, pItem->iCursor, 0); + } + } + if( pItem->fg.jointype & JT_LTORJ ){ + for(j=i+1; jnSrc; j++){ + SrcItem *pI2 = &pTabList->a[j]; + if( pI2->fg.jointype & JT_RIGHT ){ + if( pI2->fg.jointype & JT_LEFT ){ + TREETRACE(0x1000,pParse,p, + ("FULL-JOIN simplifies to LEFT-JOIN on term %d\n",j)); + pI2->fg.jointype &= ~JT_RIGHT; + }else{ + TREETRACE(0x1000,pParse,p, + ("RIGHT-JOIN simplifies to JOIN on term %d\n",j)); + pI2->fg.jointype &= ~(JT_RIGHT|JT_OUTER); + unsetJoinExpr(p->pWhere, pI2->iCursor, 1); + } + } + } + for(j=pTabList->nSrc-1; j>=0; j--){ + pTabList->a[j].fg.jointype &= ~JT_LTORJ; + if( pTabList->a[j].fg.jointype & JT_RIGHT ) break; + } + } } - /* No futher action if this term of the FROM clause is no a subquery */ + /* No further action if this term of the FROM clause is not a subquery */ if( pSub==0 ) continue; /* Catch mismatch in the declared columns of a view and the number of @@ -147750,6 +154873,14 @@ SQLITE_PRIVATE int sqlite3Select( goto select_end; } + /* Do not attempt the usual optimizations (flattening and ORDER BY + ** elimination) on a MATERIALIZED common table expression because + ** a MATERIALIZED common table expression is an optimization fence. + */ + if( pItem->fg.isCte && pItem->u2.pCteUse->eM10d==M10d_Yes ){ + continue; + } + /* Do not try to flatten an aggregate subquery. ** ** Flattening an aggregate subquery is only possible if the outer query @@ -147779,6 +154910,8 @@ SQLITE_PRIVATE int sqlite3Select( ** (a) The outer query has a different ORDER BY clause ** (b) The subquery is part of a join ** See forum post 062d576715d277c8 + ** + ** Also retain the ORDER BY if the OmitOrderBy optimization is disabled. */ if( pSub->pOrderBy!=0 && (p->pOrderBy!=0 || pTabList->nSrc>1) /* Condition (5) */ @@ -147787,11 +154920,10 @@ SQLITE_PRIVATE int sqlite3Select( && (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */ && OptimizationEnabled(db, SQLITE_OmitOrderBy) ){ - SELECTTRACE(0x100,pParse,p, + TREETRACE(0x800,pParse,p, ("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1)); - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))sqlite3ExprListDelete, - pSub->pOrderBy); + sqlite3ParserAddCleanup(pParse, sqlite3ExprListDeleteGeneric, + pSub->pOrderBy); pSub->pOrderBy = 0; } @@ -147842,8 +154974,8 @@ SQLITE_PRIVATE int sqlite3Select( if( p->pPrior ){ rc = multiSelect(pParse, p, pDest); #if TREETRACE_ENABLED - SELECTTRACE(0x1,pParse,p,("end compound-select processing\n")); - if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + TREETRACE(0x400,pParse,p,("end compound-select processing\n")); + if( (sqlite3TreeTrace & 0x400)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -147863,24 +154995,21 @@ SQLITE_PRIVATE int sqlite3Select( && propagateConstants(pParse, p) ){ #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p,("After constant propagation:\n")); + if( sqlite3TreeTrace & 0x2000 ){ + TREETRACE(0x2000,pParse,p,("After constant propagation:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif }else{ - SELECTTRACE(0x100,pParse,p,("Constant propagation not helpful\n")); + TREETRACE(0x2000,pParse,p,("Constant propagation not helpful\n")); } -#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION if( OptimizationEnabled(db, SQLITE_QueryFlattener|SQLITE_CountOfView) && countOfViewOptimization(pParse, p) ){ if( db->mallocFailed ) goto select_end; - pEList = p->pEList; pTabList = p->pSrc; } -#endif /* For each term in the FROM clause, do two things: ** (1) Authorized unreferenced tables @@ -147939,39 +155068,42 @@ SQLITE_PRIVATE int sqlite3Select( if( OptimizationEnabled(db, SQLITE_PushDown) && (pItem->fg.isCte==0 || (pItem->u2.pCteUse->eM10d!=M10d_Yes && pItem->u2.pCteUse->nUse<2)) - && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem) + && pushDownWhereTerms(pParse, pSub, p->pWhere, pTabList, i) ){ #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x100 ){ - SELECTTRACE(0x100,pParse,p, + if( sqlite3TreeTrace & 0x4000 ){ + TREETRACE(0x4000,pParse,p, ("After WHERE-clause push-down into subquery %d:\n", pSub->selId)); sqlite3TreeViewSelect(0, p, 0); } #endif assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 ); }else{ - SELECTTRACE(0x100,pParse,p,("Push-down not possible\n")); + TREETRACE(0x4000,pParse,p,("Push-down not possible\n")); + } + + /* Convert unused result columns of the subquery into simple NULL + ** expressions, to avoid unneeded searching and computation. + */ + if( OptimizationEnabled(db, SQLITE_NullUnusedCols) + && disableUnusedSubqueryResultColumns(pItem) + ){ +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x4000 ){ + TREETRACE(0x4000,pParse,p, + ("Change unused result columns to NULL for subquery %d:\n", + pSub->selId)); + sqlite3TreeViewSelect(0, p, 0); + } +#endif } zSavedAuthContext = pParse->zAuthContext; pParse->zAuthContext = pItem->zName; /* Generate code to implement the subquery - ** - ** The subquery is implemented as a co-routine if all of the following are - ** true: - ** - ** (1) the subquery is guaranteed to be the outer loop (so that - ** it does not need to be computed more than once), and - ** (2) the subquery is not a CTE that should be materialized - ** (3) the subquery is not part of a left operand for a RIGHT JOIN */ - if( i==0 - && (pTabList->nSrc==1 - || (pTabList->a[1].fg.jointype&(JT_OUTER|JT_CROSS))!=0) /* (1) */ - && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) /* (2) */ - && (pTabList->a[0].fg.jointype & JT_LTORJ)==0 /* (3) */ - ){ + if( fromClauseTermCanBeCoroutine(pParse, pTabList, i, p->selFlags) ){ /* Implement a co-routine that will return a single row of the result ** set on each invocation. */ @@ -147993,7 +155125,7 @@ SQLITE_PRIVATE int sqlite3Select( }else if( pItem->fg.isCte && pItem->u2.pCteUse->addrM9e>0 ){ /* This is a CTE for which materialization code has already been ** generated. Invoke the subroutine to compute the materialization, - ** the make the pItem->iCursor be a copy of the ephemerial table that + ** the make the pItem->iCursor be a copy of the ephemeral table that ** holds the result of the materialization. */ CteUse *pCteUse = pItem->u2.pCteUse; sqlite3VdbeAddOp2(v, OP_Gosub, pCteUse->regRtn, pCteUse->addrM9e); @@ -148002,7 +155134,7 @@ SQLITE_PRIVATE int sqlite3Select( VdbeComment((v, "%!S", pItem)); } pSub->nSelectRow = pCteUse->nRowEst; - }else if( (pPrior = isSelfJoinView(pTabList, pItem))!=0 ){ + }else if( (pPrior = isSelfJoinView(pTabList, pItem, 0, i))!=0 ){ /* This view has already been materialized by a prior entry in ** this same FROM clause. Reuse it. */ if( pPrior->addrFillSub ){ @@ -148016,6 +155148,9 @@ SQLITE_PRIVATE int sqlite3Select( ** the same view can reuse the materialization. */ int topAddr; int onceAddr = 0; +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExplain; +#endif pItem->regReturn = ++pParse->nMem; topAddr = sqlite3VdbeAddOp0(v, OP_Goto); @@ -148031,12 +155166,14 @@ SQLITE_PRIVATE int sqlite3Select( VdbeNoopComment((v, "materialize %!S", pItem)); } sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor); - ExplainQueryPlan((pParse, 1, "MATERIALIZE %!S", pItem)); + + ExplainQueryPlan2(addrExplain, (pParse, 1, "MATERIALIZE %!S", pItem)); sqlite3Select(pParse, pSub, &dest); pItem->pTab->nRowLogEst = pSub->nSelectRow; if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr); sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1); VdbeComment((v, "end %!S", pItem)); + sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1); sqlite3VdbeJumpHere(v, topAddr); sqlite3ClearTempRegCache(pParse); if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){ @@ -148062,8 +155199,8 @@ SQLITE_PRIVATE int sqlite3Select( sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n")); + if( sqlite3TreeTrace & 0x8000 ){ + TREETRACE(0x8000,pParse,p,("After all FROM-clause analysis:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif @@ -148099,8 +155236,8 @@ SQLITE_PRIVATE int sqlite3Select( sDistinct.isTnct = 2; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n")); + if( sqlite3TreeTrace & 0x20000 ){ + TREETRACE(0x20000,pParse,p,("Transform DISTINCT into GROUP BY:\n")); sqlite3TreeViewSelect(0, p, 0); } #endif @@ -148152,7 +155289,7 @@ SQLITE_PRIVATE int sqlite3Select( if( (p->selFlags & SF_FixedLimit)==0 ){ p->nSelectRow = 320; /* 4 billion rows */ } - computeLimitRegisters(pParse, p, iEnd); + if( p->pLimit ) computeLimitRegisters(pParse, p, iEnd); if( p->iLimit==0 && sSort.addrSortIndex>=0 ){ sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen); sSort.sortFlags |= SORTFLAG_UseSorter; @@ -148186,7 +155323,7 @@ SQLITE_PRIVATE int sqlite3Select( /* Begin the database scan. */ - SELECTTRACE(1,pParse,p,("WhereBegin\n")); + TREETRACE(0x2,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy, p->pEList, p, wctrlFlags, p->nSelectRow); if( pWInfo==0 ) goto select_end; @@ -148203,7 +155340,7 @@ SQLITE_PRIVATE int sqlite3Select( sSort.pOrderBy = 0; } } - SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); + TREETRACE(0x2,pParse,p,("WhereBegin returns\n")); /* If sorting index that was created by a prior OP_OpenEphemeral ** instruction ended up not being needed, then change the OP_OpenEphemeral @@ -148242,7 +155379,7 @@ SQLITE_PRIVATE int sqlite3Select( /* End the database scan loop. */ - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); } }else{ @@ -148315,20 +155452,21 @@ SQLITE_PRIVATE int sqlite3Select( */ pAggInfo = sqlite3DbMallocZero(db, sizeof(*pAggInfo) ); if( pAggInfo ){ - sqlite3ParserAddCleanup(pParse, - (void(*)(sqlite3*,void*))agginfoFree, pAggInfo); + sqlite3ParserAddCleanup(pParse, agginfoFree, pAggInfo); testcase( pParse->earlyCleanup ); } if( db->mallocFailed ){ goto select_end; } pAggInfo->selId = p->selId; +#ifdef SQLITE_DEBUG + pAggInfo->pSelect = p; +#endif memset(&sNC, 0, sizeof(sNC)); sNC.pParse = pParse; sNC.pSrcList = pTabList; sNC.uNC.pAggInfo = pAggInfo; VVA_ONLY( sNC.ncFlags = NC_UAggInfo; ) - pAggInfo->mnReg = pParse->nMem+1; pAggInfo->nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0; pAggInfo->pGroupBy = pGroupBy; sqlite3ExprAnalyzeAggList(&sNC, pEList); @@ -148349,40 +155487,17 @@ SQLITE_PRIVATE int sqlite3Select( }else{ minMaxFlag = WHERE_ORDERBY_NORMAL; } - for(i=0; inFunc; i++){ - Expr *pExpr = pAggInfo->aFunc[i].pFExpr; - assert( ExprUseXList(pExpr) ); - sNC.ncFlags |= NC_InAggFunc; - sqlite3ExprAnalyzeAggList(&sNC, pExpr->x.pList); -#ifndef SQLITE_OMIT_WINDOWFUNC - assert( !IsWindowFunc(pExpr) ); - if( ExprHasProperty(pExpr, EP_WinFunc) ){ - sqlite3ExprAnalyzeAggregates(&sNC, pExpr->y.pWin->pFilter); - } -#endif - sNC.ncFlags &= ~NC_InAggFunc; - } - pAggInfo->mxReg = pParse->nMem; + analyzeAggFuncArgs(pAggInfo, &sNC); if( db->mallocFailed ) goto select_end; #if TREETRACE_ENABLED - if( sqlite3TreeTrace & 0x400 ){ - int ii; - SELECTTRACE(0x400,pParse,p,("After aggregate analysis %p:\n", pAggInfo)); + if( sqlite3TreeTrace & 0x20 ){ + TREETRACE(0x20,pParse,p,("After aggregate analysis %p:\n", pAggInfo)); sqlite3TreeViewSelect(0, p, 0); if( minMaxFlag ){ sqlite3DebugPrintf("MIN/MAX Optimization (0x%02x) adds:\n", minMaxFlag); sqlite3TreeViewExprList(0, pMinMaxOrderBy, 0, "ORDERBY"); } - for(ii=0; iinColumn; ii++){ - sqlite3DebugPrintf("agg-column[%d] iMem=%d\n", - ii, pAggInfo->aCol[ii].iMem); - sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0); - } - for(ii=0; iinFunc; ii++){ - sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n", - ii, pAggInfo->aFunc[ii].iMem); - sqlite3TreeViewExpr(0, pAggInfo->aFunc[ii].pFExpr, 0); - } + printAggInfo(pAggInfo); } #endif @@ -148392,7 +155507,7 @@ SQLITE_PRIVATE int sqlite3Select( */ if( pGroupBy ){ KeyInfo *pKeyInfo; /* Keying information for the group by clause */ - int addr1; /* A-vs-B comparision jump */ + int addr1; /* A-vs-B comparison jump */ int addrOutputRow; /* Start of subroutine that outputs a result row */ int regOutputRow; /* Return address register for output subroutine */ int addrSetAbort; /* Set the abort flag and return */ @@ -148451,17 +155566,21 @@ SQLITE_PRIVATE int sqlite3Select( ** in the right order to begin with. */ sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset); - SELECTTRACE(1,pParse,p,("WhereBegin\n")); + TREETRACE(0x2,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, pDistinct, - 0, (sDistinct.isTnct==2 ? WHERE_DISTINCTBY : WHERE_GROUPBY) + p, (sDistinct.isTnct==2 ? WHERE_DISTINCTBY : WHERE_GROUPBY) | (orderByGrp ? WHERE_SORTBYGROUP : 0) | distFlag, 0 ); if( pWInfo==0 ){ sqlite3ExprListDelete(db, pDistinct); goto select_end; } + if( pParse->pIdxEpr ){ + optimizeAggregateUseOfIndexedExpr(pParse, p, pAggInfo, &sNC); + } + assignAggregateRegisters(pParse, pAggInfo); eDist = sqlite3WhereIsDistinct(pWInfo); - SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); + TREETRACE(0x2,pParse,p,("WhereBegin returns\n")); if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){ /* The optimizer is able to deliver rows in group by order so ** we do not have to sort. The OP_OpenEphemeral table will be @@ -148479,9 +155598,13 @@ SQLITE_PRIVATE int sqlite3Select( int nCol; int nGroupBy; - explainTempTable(pParse, +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExp; /* Address of OP_Explain instruction */ +#endif + ExplainQueryPlan2(addrExp, (pParse, 0, "USE TEMP B-TREE FOR %s", (sDistinct.isTnct && (p->selFlags&SF_Distinct)==0) ? - "DISTINCT" : "GROUP BY"); + "DISTINCT" : "GROUP BY" + )); groupBySort = 1; nGroupBy = pGroupBy->nExpr; @@ -148496,28 +155619,50 @@ SQLITE_PRIVATE int sqlite3Select( regBase = sqlite3GetTempRange(pParse, nCol); sqlite3ExprCodeExprList(pParse, pGroupBy, regBase, 0, 0); j = nGroupBy; + pAggInfo->directMode = 1; for(i=0; inColumn; i++){ struct AggInfo_col *pCol = &pAggInfo->aCol[i]; if( pCol->iSorterColumn>=j ){ - int r1 = j + regBase; - sqlite3ExprCodeGetColumnOfTable(v, - pCol->pTab, pCol->iTable, pCol->iColumn, r1); + sqlite3ExprCode(pParse, pCol->pCExpr, j + regBase); j++; } } + pAggInfo->directMode = 0; regRecord = sqlite3GetTempReg(pParse); + sqlite3VdbeScanStatusCounters(v, addrExp, 0, sqlite3VdbeCurrentAddr(v)); sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regRecord); sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord); + sqlite3VdbeScanStatusRange(v, addrExp, sqlite3VdbeCurrentAddr(v)-2, -1); sqlite3ReleaseTempReg(pParse, regRecord); sqlite3ReleaseTempRange(pParse, regBase, nCol); - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++; sortOut = sqlite3GetTempReg(pParse); + sqlite3VdbeScanStatusCounters(v, addrExp, sqlite3VdbeCurrentAddr(v), 0); sqlite3VdbeAddOp3(v, OP_OpenPseudo, sortPTab, sortOut, nCol); sqlite3VdbeAddOp2(v, OP_SorterSort, pAggInfo->sortingIdx, addrEnd); VdbeComment((v, "GROUP BY sort")); VdbeCoverage(v); pAggInfo->useSortingIdx = 1; + sqlite3VdbeScanStatusRange(v, addrExp, -1, sortPTab); + sqlite3VdbeScanStatusRange(v, addrExp, -1, pAggInfo->sortingIdx); + } + + /* If there are entries in pAgggInfo->aFunc[] that contain subexpressions + ** that are indexed (and that were previously identified and tagged + ** in optimizeAggregateUseOfIndexedExpr()) then those subexpressions + ** must now be converted into a TK_AGG_COLUMN node so that the value + ** is correctly pulled from the index rather than being recomputed. */ + if( pParse->pIdxEpr ){ + aggregateConvertIndexedExprRefToColumn(pAggInfo); +#if TREETRACE_ENABLED + if( sqlite3TreeTrace & 0x20 ){ + TREETRACE(0x20, pParse, p, + ("AggInfo function expressions converted to reference index\n")); + sqlite3TreeViewSelect(0, p, 0); + printAggInfo(pAggInfo); + } +#endif } /* If the index or temporary table used by the GROUP BY sort @@ -148588,7 +155733,7 @@ SQLITE_PRIVATE int sqlite3Select( sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx,addrTopOfLoop); VdbeCoverage(v); }else{ - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); sqlite3VdbeChangeToNoop(v, addrSortingIdx); } @@ -148698,7 +155843,8 @@ SQLITE_PRIVATE int sqlite3Select( if( pKeyInfo ){ sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO); } - sqlite3VdbeAddOp2(v, OP_Count, iCsr, pAggInfo->aFunc[0].iMem); + assignAggregateRegisters(pParse, pAggInfo); + sqlite3VdbeAddOp2(v, OP_Count, iCsr, AggInfoFuncReg(pAggInfo,0)); sqlite3VdbeAddOp1(v, OP_Close, iCsr); explainSimpleCount(pParse, pTab, pBest); }else{ @@ -148734,6 +155880,7 @@ SQLITE_PRIVATE int sqlite3Select( pDistinct = pAggInfo->aFunc[0].pFExpr->x.pList; distFlag = pDistinct ? (WHERE_WANT_DISTINCT|WHERE_AGG_DISTINCT) : 0; } + assignAggregateRegisters(pParse, pAggInfo); /* This case runs if the aggregate has no GROUP BY clause. The ** processing is much simpler since there is only a single row @@ -148750,13 +155897,13 @@ SQLITE_PRIVATE int sqlite3Select( assert( minMaxFlag==WHERE_ORDERBY_NORMAL || pMinMaxOrderBy!=0 ); assert( pMinMaxOrderBy==0 || pMinMaxOrderBy->nExpr==1 ); - SELECTTRACE(1,pParse,p,("WhereBegin\n")); + TREETRACE(0x2,pParse,p,("WhereBegin\n")); pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy, - pDistinct, 0, minMaxFlag|distFlag, 0); + pDistinct, p, minMaxFlag|distFlag, 0); if( pWInfo==0 ){ goto select_end; } - SELECTTRACE(1,pParse,p,("WhereBegin returns\n")); + TREETRACE(0x2,pParse,p,("WhereBegin returns\n")); eDist = sqlite3WhereIsDistinct(pWInfo); updateAccumulator(pParse, regAcc, pAggInfo, eDist); if( eDist!=WHERE_DISTINCT_NOOP ){ @@ -148770,7 +155917,7 @@ SQLITE_PRIVATE int sqlite3Select( if( minMaxFlag ){ sqlite3WhereMinMaxOptEarlyOut(v, pWInfo); } - SELECTTRACE(1,pParse,p,("WhereEnd\n")); + TREETRACE(0x2,pParse,p,("WhereEnd\n")); sqlite3WhereEnd(pWInfo); finalizeAggFunctions(pParse, pAggInfo); } @@ -148792,8 +155939,6 @@ SQLITE_PRIVATE int sqlite3Select( ** and send them to the callback one by one. */ if( sSort.pOrderBy ){ - explainTempTable(pParse, - sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY"); assert( p->pEList==pEList ); generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest); } @@ -148817,7 +155962,7 @@ SQLITE_PRIVATE int sqlite3Select( if( pAggInfo && !db->mallocFailed ){ for(i=0; inColumn; i++){ Expr *pExpr = pAggInfo->aCol[i].pCExpr; - assert( pExpr!=0 ); + if( pExpr==0 ) continue; assert( pExpr->pAggInfo==pAggInfo ); assert( pExpr->iAgg==i ); } @@ -148831,8 +155976,8 @@ SQLITE_PRIVATE int sqlite3Select( #endif #if TREETRACE_ENABLED - SELECTTRACE(0x1,pParse,p,("end processing\n")); - if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ + TREETRACE(0x1,pParse,p,("end processing\n")); + if( (sqlite3TreeTrace & 0x40000)!=0 && ExplainQueryPlanParent(pParse)==0 ){ sqlite3TreeViewSelect(0, p, 0); } #endif @@ -149106,7 +156251,7 @@ SQLITE_PRIVATE Trigger *sqlite3TriggerList(Parse *pParse, Table *pTab){ if( pTrig->pTabSchema==pTab->pSchema && pTrig->table && 0==sqlite3StrICmp(pTrig->table, pTab->zName) - && pTrig->pTabSchema!=pTmpSchema + && (pTrig->pTabSchema!=pTmpSchema || pTrig->bReturning) ){ pTrig->pNext = pList; pList = pTrig; @@ -149228,6 +156373,10 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( sqlite3ErrorMsg(pParse, "cannot create triggers on virtual tables"); goto trigger_orphan_error; } + if( (pTab->tabFlags & TF_Shadow)!=0 && sqlite3ReadOnlyShadowTables(db) ){ + sqlite3ErrorMsg(pParse, "cannot create triggers on shadow tables"); + goto trigger_orphan_error; + } /* Check that the trigger name is not reserved and that no trigger of the ** specified name exists */ @@ -149247,6 +156396,7 @@ SQLITE_PRIVATE void sqlite3BeginTrigger( }else{ assert( !db->init.busy ); sqlite3CodeVerifySchema(pParse, iDb); + VVA_ONLY( pParse->ifNotExists = 1; ) } goto trigger_cleanup; } @@ -150010,10 +157160,17 @@ static void codeReturningTrigger( SrcList sFrom; assert( v!=0 ); - assert( pParse->bReturning ); + if( !pParse->bReturning ){ + /* This RETURNING trigger must be for a different statement as + ** this statement lacks a RETURNING clause. */ + return; + } assert( db->pParse==pParse ); pReturning = pParse->u1.pReturning; - assert( pTrigger == &(pReturning->retTrig) ); + if( pTrigger != &(pReturning->retTrig) ){ + /* This RETURNING trigger is for a different statement */ + return; + } memset(&sSelect, 0, sizeof(sSelect)); memset(&sFrom, 0, sizeof(sFrom)); sSelect.pEList = sqlite3ExprListDup(db, pReturning->pReturnEL, 0); @@ -150028,7 +157185,7 @@ static void codeReturningTrigger( } sqlite3ExprListDelete(db, sSelect.pEList); pNew = sqlite3ExpandReturning(pParse, pReturning->pReturnEL, pTab); - if( !db->mallocFailed ){ + if( pParse->nErr==0 ){ NameContext sNC; memset(&sNC, 0, sizeof(sNC)); if( pReturning->nRetCol==0 ){ @@ -150236,7 +157393,7 @@ static TriggerPrg *codeRowTrigger( sSubParse.zAuthContext = pTrigger->zName; sSubParse.eTriggerOp = pTrigger->op; sSubParse.nQueryLoop = pParse->nQueryLoop; - sSubParse.disableVtab = pParse->disableVtab; + sSubParse.prepFlags = pParse->prepFlags; v = sqlite3GetVdbe(&sSubParse); if( v ){ @@ -150497,6 +157654,9 @@ SQLITE_PRIVATE u32 sqlite3TriggerColmask( Trigger *p; assert( isNew==1 || isNew==0 ); + if( IsView(pTab) ){ + return 0xffffffff; + } for(p=pTrigger; p; p=p->pNext){ if( p->op==op && (tr_tm&p->tr_tm) @@ -150582,11 +157742,14 @@ static void updateVirtualTable( ** it has been converted into REAL. */ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ + Column *pCol; assert( pTab!=0 ); - if( !IsView(pTab) ){ + assert( pTab->nCol>i ); + pCol = &pTab->aCol[i]; + if( pCol->iDflt ){ sqlite3_value *pValue = 0; u8 enc = ENC(sqlite3VdbeDb(v)); - Column *pCol = &pTab->aCol[i]; + assert( !IsView(pTab) ); VdbeComment((v, "%s.%s", pTab->zName, pCol->zCnName)); assert( inCol ); sqlite3ValueFromExpr(sqlite3VdbeDb(v), @@ -150597,7 +157760,7 @@ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){ } } #ifndef SQLITE_OMIT_FLOATING_POINT - if( pTab->aCol[i].affinity==SQLITE_AFF_REAL && !IsVirtual(pTab) ){ + if( pCol->affinity==SQLITE_AFF_REAL && !IsVirtual(pTab) ){ sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg); } #endif @@ -150744,7 +157907,7 @@ static void updateFromSelect( assert( pTabList->nSrc>1 ); if( pSrc ){ - pSrc->a[0].fg.notCte = 1; + assert( pSrc->a[0].fg.notCte ); pSrc->a[0].iCursor = -1; pSrc->a[0].pTab->nTabRef--; pSrc->a[0].pTab = 0; @@ -150783,7 +157946,8 @@ static void updateFromSelect( } } pSelect = sqlite3SelectNew(pParse, pList, - pSrc, pWhere2, pGrp, 0, pOrderBy2, SF_UFSrcCheck|SF_IncludeHidden, pLimit2 + pSrc, pWhere2, pGrp, 0, pOrderBy2, + SF_UFSrcCheck|SF_IncludeHidden|SF_UpdateFrom, pLimit2 ); if( pSelect ) pSelect->selFlags |= SF_OrderByReqd; sqlite3SelectDestInit(&dest, eDest, iEph); @@ -150927,7 +158091,7 @@ SQLITE_PRIVATE void sqlite3Update( if( sqlite3ViewGetColumnNames(pParse, pTab) ){ goto update_cleanup; } - if( sqlite3IsReadOnly(pParse, pTab, tmask) ){ + if( sqlite3IsReadOnly(pParse, pTab, pTrigger) ){ goto update_cleanup; } @@ -151246,12 +158410,22 @@ SQLITE_PRIVATE void sqlite3Update( /* Begin the database scan. ** ** Do not consider a single-pass strategy for a multi-row update if - ** there are any triggers or foreign keys to process, or rows may - ** be deleted as a result of REPLACE conflict handling. Any of these - ** things might disturb a cursor being used to scan through the table - ** or index, causing a single-pass approach to malfunction. */ + ** there is anything that might disrupt the cursor being used to do + ** the UPDATE: + ** (1) This is a nested UPDATE + ** (2) There are triggers + ** (3) There are FOREIGN KEY constraints + ** (4) There are REPLACE conflict handlers + ** (5) There are subqueries in the WHERE clause + */ flags = WHERE_ONEPASS_DESIRED; - if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){ + if( !pParse->nested + && !pTrigger + && !hasFK + && !chngKey + && !bReplace + && (pWhere==0 || !ExprHasProperty(pWhere, EP_Subquery)) + ){ flags |= WHERE_ONEPASS_MULTIROW; } pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere,0,0,0,flags,iIdxCur); @@ -151322,6 +158496,8 @@ SQLITE_PRIVATE void sqlite3Update( if( !isView ){ int addrOnce = 0; + int iNotUsed1 = 0; + int iNotUsed2 = 0; /* Open every index that needs updating. */ if( eOnePass!=ONEPASS_OFF ){ @@ -151333,7 +158509,7 @@ SQLITE_PRIVATE void sqlite3Update( addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); } sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur, - aToOpen, 0, 0); + aToOpen, &iNotUsed1, &iNotUsed2); if( addrOnce ){ sqlite3VdbeJumpHereOrPopInst(v, addrOnce); } @@ -151428,6 +158604,9 @@ SQLITE_PRIVATE void sqlite3Update( } } if( chngRowid==0 && pPk==0 ){ +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + if( isView ) sqlite3VdbeAddOp2(v, OP_Null, 0, regOldRowid); +#endif sqlite3VdbeAddOp2(v, OP_Copy, regOldRowid, regNewRowid); } } @@ -151624,8 +158803,10 @@ SQLITE_PRIVATE void sqlite3Update( sqlite3VdbeAddOp2(v, OP_AddImm, regRowCount, 1); } - sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, - TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue); + if( pTrigger ){ + sqlite3CodeRowTrigger(pParse, pTrigger, TK_UPDATE, pChanges, + TRIGGER_AFTER, pTab, regOldRowid, onError, labelContinue); + } /* Repeat the above with the next record to be updated, until ** all record selected by the WHERE clause have been updated. @@ -151720,7 +158901,7 @@ static void updateVirtualTable( int nArg = 2 + pTab->nCol; /* Number of arguments to VUpdate */ int regArg; /* First register in VUpdate arg array */ int regRec; /* Register in which to assemble record */ - int regRowid; /* Register for ephem table rowid */ + int regRowid; /* Register for ephemeral table rowid */ int iCsr = pSrc->a[0].iCursor; /* Cursor used for virtual table scan */ int aDummy[2]; /* Unused arg for sqlite3WhereOkOnePass() */ int eOnePass; /* True to use onepass strategy */ @@ -151764,7 +158945,9 @@ static void updateVirtualTable( sqlite3ExprDup(db, pChanges->a[aXRef[i]].pExpr, 0) ); }else{ - pList = sqlite3ExprListAppend(pParse, pList, exprRowColumn(pParse, i)); + Expr *pRowExpr = exprRowColumn(pParse, i); + if( pRowExpr ) pRowExpr->op2 = OPFLAG_NOCHNG; + pList = sqlite3ExprListAppend(pParse, pList, pRowExpr); } } @@ -151841,7 +159024,7 @@ static void updateVirtualTable( sqlite3WhereEnd(pWInfo); } - /* Begin scannning through the ephemeral table. */ + /* Begin scanning through the ephemeral table. */ addr = sqlite3VdbeAddOp1(v, OP_Rewind, ephemTab); VdbeCoverage(v); /* Extract arguments from the current row of the ephemeral table and @@ -151961,7 +159144,8 @@ SQLITE_PRIVATE Upsert *sqlite3UpsertNew( SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( Parse *pParse, /* The parsing context */ SrcList *pTabList, /* Table into which we are inserting */ - Upsert *pUpsert /* The ON CONFLICT clauses */ + Upsert *pUpsert, /* The ON CONFLICT clauses */ + Upsert *pAll /* Complete list of all ON CONFLICT clauses */ ){ Table *pTab; /* That table into which we are inserting */ int rc; /* Result code */ @@ -152037,6 +159221,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( if( pIdx->aiColumn[ii]==XN_EXPR ){ assert( pIdx->aColExpr!=0 ); assert( pIdx->aColExpr->nExpr>ii ); + assert( pIdx->bHasExpr ); pExpr = pIdx->aColExpr->a[ii].pExpr; if( pExpr->op!=TK_COLLATE ){ sCol[0].pLeft = pExpr; @@ -152048,7 +159233,7 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( pExpr = &sCol[0]; } for(jj=0; jja[jj].pExpr,pExpr,iCursor)<2 ){ + if( sqlite3ExprCompare(0,pTarget->a[jj].pExpr,pExpr,iCursor)<2 ){ break; /* Column ii of the index matches column jj of target */ } } @@ -152063,6 +159248,14 @@ SQLITE_PRIVATE int sqlite3UpsertAnalyzeTarget( continue; } pUpsert->pUpsertIdx = pIdx; + if( sqlite3UpsertOfIndex(pAll,pIdx)!=pUpsert ){ + /* Really this should be an error. The isDup ON CONFLICT clause will + ** never fire. But this problem was not discovered until three years + ** after multi-CONFLICT upsert was added, and so we silently ignore + ** the problem to prevent breaking applications that might actually + ** have redundant ON CONFLICT clauses. */ + pUpsert->isDup = 1; + } break; } if( pUpsert->pUpsertIdx==0 ){ @@ -152089,9 +159282,13 @@ SQLITE_PRIVATE int sqlite3UpsertNextIsIPK(Upsert *pUpsert){ Upsert *pNext; if( NEVER(pUpsert==0) ) return 0; pNext = pUpsert->pNextUpsert; - if( pNext==0 ) return 1; - if( pNext->pUpsertTarget==0 ) return 1; - if( pNext->pUpsertIdx==0 ) return 1; + while( 1 /*exit-by-return*/ ){ + if( pNext==0 ) return 1; + if( pNext->pUpsertTarget==0 ) return 1; + if( pNext->pUpsertIdx==0 ) return 1; + if( !pNext->isDup ) return 0; + pNext = pNext->pNextUpsert; + } return 0; } @@ -152350,6 +159547,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( int nDb; /* Number of attached databases */ const char *zDbMain; /* Schema name of database to vacuum */ const char *zOut; /* Name of output file */ + u32 pgflags = PAGER_SYNCHRONOUS_OFF; /* sync flags for output db */ if( !db->autoCommit ){ sqlite3SetString(pzErrMsg, db, "cannot VACUUM from within a transaction"); @@ -152396,7 +159594,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( ** (possibly synchronous) transaction opened on the main database before ** sqlite3BtreeCopyFile() is called. ** - ** An optimisation would be to use a non-journaled pager. + ** An optimization would be to use a non-journaled pager. ** (Later:) I tried setting "PRAGMA vacuum_db.journal_mode=OFF" but ** that actually made the VACUUM run slower. Very little journalling ** actually occurs when doing a vacuum since the vacuum_db is initially @@ -152421,6 +159619,11 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( goto end_of_vacuum; } db->mDbFlags |= DBFLAG_VacuumInto; + + /* For a VACUUM INTO, the pager-flags are set to the same values as + ** they are for the database being vacuumed, except that PAGER_CACHESPILL + ** is always set. */ + pgflags = db->aDb[iDb].safety_level | (db->flags & PAGER_FLAGS_MASK); } nRes = sqlite3BtreeGetRequestedReserve(pMain); @@ -152439,7 +159642,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE int sqlite3RunVacuum( sqlite3BtreeSetCacheSize(pTemp, db->aDb[iDb].pSchema->cache_size); sqlite3BtreeSetSpillSize(pTemp, sqlite3BtreeSetSpillSize(pMain,0)); - sqlite3BtreeSetPagerFlags(pTemp, PAGER_SYNCHRONOUS_OFF|PAGER_CACHESPILL); + sqlite3BtreeSetPagerFlags(pTemp, pgflags|PAGER_CACHESPILL); /* Begin a transaction and take an exclusive lock on the main database ** file. This is done before the sqlite3BtreeGetPageSize(pMain) call below, @@ -152823,10 +160026,10 @@ SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){ pVTab->nRef--; if( pVTab->nRef==0 ){ sqlite3_vtab *p = pVTab->pVtab; - sqlite3VtabModuleUnref(pVTab->db, pVTab->pMod); if( p ){ p->pModule->xDisconnect(p); } + sqlite3VtabModuleUnref(pVTab->db, pVTab->pMod); sqlite3DbFree(db, pVTab); } } @@ -152927,7 +160130,6 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ if( p ){ db->pDisconnect = 0; - sqlite3ExpirePreparedStatements(db, 0); do { VTable *pNext = p->pNext; sqlite3VtabUnlock(p); @@ -152952,7 +160154,8 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3 *db){ */ SQLITE_PRIVATE void sqlite3VtabClear(sqlite3 *db, Table *p){ assert( IsVirtual(p) ); - if( !db || db->pnBytesFreed==0 ) vtabDisconnectAll(0, p); + assert( db!=0 ); + if( db->pnBytesFreed==0 ) vtabDisconnectAll(0, p); if( p->u.vtab.azArg ){ int i; for(i=0; iu.vtab.nArg; i++){ @@ -153092,7 +160295,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){ ** the information we've collected. ** ** The VM register number pParse->regRowid holds the rowid of an - ** entry in the sqlite_schema table tht was created for this vtab + ** entry in the sqlite_schema table that was created for this vtab ** by sqlite3StartTable(). */ iDb = sqlite3SchemaToIndex(db, pTab->pSchema); @@ -153221,7 +160424,9 @@ static int vtabCallConstructor( sCtx.pPrior = db->pVtabCtx; sCtx.bDeclared = 0; db->pVtabCtx = &sCtx; + pTab->nTabRef++; rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr); + sqlite3DeleteTable(db, pTab); db->pVtabCtx = sCtx.pPrior; if( rc==SQLITE_NOMEM ) sqlite3OomFault(db); assert( sCtx.pTab==pTab ); @@ -153430,7 +160635,7 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3 *db, const char *zCreateTable){ sqlite3_mutex_enter(db->mutex); pCtx = db->pVtabCtx; if( !pCtx || pCtx->bDeclared ){ - sqlite3Error(db, SQLITE_MISUSE); + sqlite3Error(db, SQLITE_MISUSE_BKPT); sqlite3_mutex_leave(db->mutex); return SQLITE_MISUSE_BKPT; } @@ -153711,7 +160916,10 @@ SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *db, int op, int iSavepoint){ break; } if( xMethod && pVTab->iSavepoint>iSavepoint ){ + u64 savedFlags = (db->flags & SQLITE_Defensive); + db->flags &= ~(u64)SQLITE_Defensive; rc = xMethod(pVTab->pVtab, iSavepoint); + db->flags |= savedFlags; } sqlite3VtabUnlock(pVTab); } @@ -153752,7 +160960,7 @@ SQLITE_PRIVATE FuncDef *sqlite3VtabOverloadFunction( if( pExpr->op!=TK_COLUMN ) return pDef; assert( ExprUseYTab(pExpr) ); pTab = pExpr->y.pTab; - if( pTab==0 ) return pDef; + if( NEVER(pTab==0) ) return pDef; if( !IsVirtual(pTab) ) return pDef; pVtab = sqlite3GetVTable(db, pTab)->pVtab; assert( pVtab!=0 ); @@ -153831,7 +161039,7 @@ SQLITE_PRIVATE void sqlite3VtabMakeWritable(Parse *pParse, Table *pTab){ ** ** An eponymous virtual table instance is one that is named after its ** module, and more importantly, does not require a CREATE VIRTUAL TABLE -** statement in order to come into existance. Eponymous virtual table +** statement in order to come into existence. Eponymous virtual table ** instances always exist. They cannot be DROP-ed. ** ** Any virtual table module for which xConnect and xCreate are the same @@ -153940,6 +161148,10 @@ SQLITE_API int sqlite3_vtab_config(sqlite3 *db, int op, ...){ p->pVTable->eVtabRisk = SQLITE_VTABRISK_High; break; } + case SQLITE_VTAB_USES_ALL_SCHEMAS: { + p->pVTable->bAllSchemas = 1; + break; + } default: { rc = SQLITE_MISUSE_BKPT; break; @@ -154018,7 +161230,7 @@ typedef struct WhereRightJoin WhereRightJoin; /* ** This object is a header on a block of allocated memory that will be -** automatically freed when its WInfo oject is destructed. +** automatically freed when its WInfo object is destructed. */ struct WhereMemBlock { WhereMemBlock *pNext; /* Next block in the chain */ @@ -154079,7 +161291,7 @@ struct WhereLevel { int iCur; /* The VDBE cursor used by this IN operator */ int addrInTop; /* Top of the IN loop */ int iBase; /* Base register of multi-key index record */ - int nPrefix; /* Number of prior entires in the key */ + int nPrefix; /* Number of prior entries in the key */ u8 eEndLoopOp; /* IN Loop terminator. OP_Next or OP_Prev */ } *aInLoop; /* Information about each nested IN operator */ } in; /* Used when pWLoop->wsFlags&WHERE_IN_ABLE */ @@ -154329,7 +161541,7 @@ struct WhereClause { int nTerm; /* Number of terms */ int nSlot; /* Number of entries in a[] */ int nBase; /* Number of terms through the last non-Virtual */ - WhereTerm *a; /* Each a[] describes a term of the WHERE cluase */ + WhereTerm *a; /* Each a[] describes a term of the WHERE clause */ #if defined(SQLITE_SMALL_STACK) WhereTerm aStatic[1]; /* Initial static space for a[] */ #else @@ -154359,7 +161571,7 @@ struct WhereAndInfo { ** between VDBE cursor numbers and bits of the bitmasks in WhereTerm. ** ** The VDBE cursor numbers are small integers contained in -** SrcList_item.iCursor and Expr.iTable fields. For any given WHERE +** SrcItem.iCursor and Expr.iTable fields. For any given WHERE ** clause, the cursor numbers might not begin with 0 and they might ** contain gaps in the numbering sequence. But we want to make maximum ** use of the bits in our bitmasks. This structure provides a mapping @@ -154430,20 +161642,6 @@ struct WhereLoopBuilder { # define SQLITE_QUERY_PLANNER_LIMIT_INCR 1000 #endif -/* -** Each instance of this object records a change to a single node -** in an expression tree to cause that node to point to a column -** of an index rather than an expression or a virtual column. All -** such transformations need to be undone at the end of WHERE clause -** processing. -*/ -typedef struct WhereExprMod WhereExprMod; -struct WhereExprMod { - WhereExprMod *pNext; /* Next translation on a list of them all */ - Expr *pExpr; /* The Expr node that was transformed */ - Expr orig; /* Original value of the Expr node */ -}; - /* ** The WHERE clause processing routine has two halves. The ** first part does the start of the WHERE loop and the second @@ -154459,10 +161657,10 @@ struct WhereInfo { SrcList *pTabList; /* List of tables in the join */ ExprList *pOrderBy; /* The ORDER BY clause or NULL */ ExprList *pResultSet; /* Result set of the query */ +#if WHERETRACE_ENABLED Expr *pWhere; /* The complete WHERE clause */ -#ifndef SQLITE_OMIT_VIRTUALTABLE - Select *pLimit; /* Used to access LIMIT expr/registers for vtabs */ #endif + Select *pSelect; /* The entire SELECT statement containing WHERE */ int aiCurOnePass[2]; /* OP_OpenWrite cursors for the ONEPASS opt */ int iContinue; /* Jump here to continue with next record */ int iBreak; /* Jump here to break out of the loop */ @@ -154481,7 +161679,6 @@ struct WhereInfo { int iTop; /* The very beginning of the WHERE loop */ int iEndWhere; /* End of the WHERE clause itself */ WhereLoop *pLoops; /* List of all WhereLoop objects */ - WhereExprMod *pExprMods; /* Expression modifications */ WhereMemBlock *pMemToFree;/* Memory to free when this object destroyed */ Bitmask revMask; /* Mask of ORDER BY terms that need reversing */ WhereClause sWC; /* Decomposition of the WHERE clause */ @@ -154498,7 +161695,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereGetMask(WhereMaskSet*,int); #ifdef WHERETRACE_ENABLED SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC); SQLITE_PRIVATE void sqlite3WhereTermPrint(WhereTerm *pTerm, int iTerm); -SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC); +SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC); #endif SQLITE_PRIVATE WhereTerm *sqlite3WhereFindTerm( WhereClause *pWC, /* The WHERE clause to be searched */ @@ -154629,6 +161826,8 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*); #define WHERE_BLOOMFILTER 0x00400000 /* Consider using a Bloom-filter */ #define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */ #define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */ + /* 0x02000000 -- available for reuse */ +#define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */ #endif /* !defined(SQLITE_WHEREINT_H) */ @@ -154726,9 +161925,9 @@ static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){ /* ** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN -** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was -** defined at compile-time. If it is not a no-op, a single OP_Explain opcode -** is added to the output to describe the table scan strategy in pLevel. +** command, or if stmt_scanstatus_v2() stats are enabled, or if SQLITE_DEBUG +** was defined at compile-time. If it is not a no-op, a single OP_Explain +** opcode is added to the output to describe the table scan strategy in pLevel. ** ** If an OP_Explain opcode is added to the VM, its address is returned. ** Otherwise, if no OP_Explain is coded, zero is returned. @@ -154740,8 +161939,8 @@ SQLITE_PRIVATE int sqlite3WhereExplainOneScan( u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */ ){ int ret = 0; -#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS) - if( sqlite3ParseToplevel(pParse)->explain==2 ) +#if !defined(SQLITE_DEBUG) + if( sqlite3ParseToplevel(pParse)->explain==2 || IS_STMT_SCANSTATUS(pParse->db) ) #endif { SrcItem *pItem = &pTabList->a[pLevel->iFrom]; @@ -154885,6 +162084,8 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter( zMsg = sqlite3StrAccumFinish(&str); ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v), pParse->addrExplain, 0, zMsg,P4_DYNAMIC); + + sqlite3VdbeScanStatus(v, sqlite3VdbeCurrentAddr(v)-1, 0, 0, 0, 0); return ret; } #endif /* SQLITE_OMIT_EXPLAIN */ @@ -154905,16 +162106,37 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus( WhereLevel *pLvl, /* Level to add scanstatus() entry for */ int addrExplain /* Address of OP_Explain (or 0) */ ){ - const char *zObj = 0; - WhereLoop *pLoop = pLvl->pWLoop; - if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ - zObj = pLoop->u.btree.pIndex->zName; - }else{ - zObj = pSrclist->a[pLvl->iFrom].zName; + if( IS_STMT_SCANSTATUS( sqlite3VdbeDb(v) ) ){ + const char *zObj = 0; + WhereLoop *pLoop = pLvl->pWLoop; + int wsFlags = pLoop->wsFlags; + int viaCoroutine = 0; + + if( (wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){ + zObj = pLoop->u.btree.pIndex->zName; + }else{ + zObj = pSrclist->a[pLvl->iFrom].zName; + viaCoroutine = pSrclist->a[pLvl->iFrom].fg.viaCoroutine; + } + sqlite3VdbeScanStatus( + v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj + ); + + if( viaCoroutine==0 ){ + if( (wsFlags & (WHERE_MULTI_OR|WHERE_AUTO_INDEX))==0 ){ + sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iTabCur); + } + if( wsFlags & WHERE_INDEXED ){ + sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur); + } + }else{ + int addr = pSrclist->a[pLvl->iFrom].addrFillSub; + VdbeOp *pOp = sqlite3VdbeGetOp(v, addr-1); + assert( sqlite3VdbeDb(v)->mallocFailed || pOp->opcode==OP_InitCoroutine ); + assert( sqlite3VdbeDb(v)->mallocFailed || pOp->p2>addr ); + sqlite3VdbeScanStatusRange(v, addrExplain, addr, pOp->p2-1); + } } - sqlite3VdbeScanStatus( - v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj - ); } #endif @@ -154974,7 +162196,7 @@ static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){ pTerm->wtFlags |= TERM_CODED; } #ifdef WHERETRACE_ENABLED - if( sqlite3WhereTrace & 0x20000 ){ + if( (sqlite3WhereTrace & 0x4001)==0x4001 ){ sqlite3DebugPrintf("DISABLE-"); sqlite3WhereTermPrint(pTerm, (int)(pTerm - (pTerm->pWC->a))); } @@ -155089,68 +162311,75 @@ static Expr *removeUnindexableInClauseTerms( Expr *pX /* The IN expression to be reduced */ ){ sqlite3 *db = pParse->db; + Select *pSelect; /* Pointer to the SELECT on the RHS */ Expr *pNew; pNew = sqlite3ExprDup(db, pX, 0); if( db->mallocFailed==0 ){ - ExprList *pOrigRhs; /* Original unmodified RHS */ - ExprList *pOrigLhs; /* Original unmodified LHS */ - ExprList *pRhs = 0; /* New RHS after modifications */ - ExprList *pLhs = 0; /* New LHS after mods */ - int i; /* Loop counter */ - Select *pSelect; /* Pointer to the SELECT on the RHS */ - - assert( ExprUseXSelect(pNew) ); - pOrigRhs = pNew->x.pSelect->pEList; - assert( pNew->pLeft!=0 ); - assert( ExprUseXList(pNew->pLeft) ); - pOrigLhs = pNew->pLeft->x.pList; - for(i=iEq; inLTerm; i++){ - if( pLoop->aLTerm[i]->pExpr==pX ){ - int iField; - assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); - iField = pLoop->aLTerm[i]->u.x.iField - 1; - if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ - pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); - pOrigRhs->a[iField].pExpr = 0; - assert( pOrigLhs->a[iField].pExpr!=0 ); - pLhs = sqlite3ExprListAppend(pParse, pLhs, pOrigLhs->a[iField].pExpr); - pOrigLhs->a[iField].pExpr = 0; - } - } - sqlite3ExprListDelete(db, pOrigRhs); - sqlite3ExprListDelete(db, pOrigLhs); - pNew->pLeft->x.pList = pLhs; - pNew->x.pSelect->pEList = pRhs; - if( pLhs && pLhs->nExpr==1 ){ - /* Take care here not to generate a TK_VECTOR containing only a - ** single value. Since the parser never creates such a vector, some - ** of the subroutines do not handle this case. */ - Expr *p = pLhs->a[0].pExpr; - pLhs->a[0].pExpr = 0; - sqlite3ExprDelete(db, pNew->pLeft); - pNew->pLeft = p; - } - pSelect = pNew->x.pSelect; - if( pSelect->pOrderBy ){ - /* If the SELECT statement has an ORDER BY clause, zero the - ** iOrderByCol variables. These are set to non-zero when an - ** ORDER BY term exactly matches one of the terms of the - ** result-set. Since the result-set of the SELECT statement may - ** have been modified or reordered, these variables are no longer - ** set correctly. Since setting them is just an optimization, - ** it's easiest just to zero them here. */ - ExprList *pOrderBy = pSelect->pOrderBy; - for(i=0; inExpr; i++){ - pOrderBy->a[i].u.x.iOrderByCol = 0; + for(pSelect=pNew->x.pSelect; pSelect; pSelect=pSelect->pPrior){ + ExprList *pOrigRhs; /* Original unmodified RHS */ + ExprList *pOrigLhs = 0; /* Original unmodified LHS */ + ExprList *pRhs = 0; /* New RHS after modifications */ + ExprList *pLhs = 0; /* New LHS after mods */ + int i; /* Loop counter */ + + assert( ExprUseXSelect(pNew) ); + pOrigRhs = pSelect->pEList; + assert( pNew->pLeft!=0 ); + assert( ExprUseXList(pNew->pLeft) ); + if( pSelect==pNew->x.pSelect ){ + pOrigLhs = pNew->pLeft->x.pList; + } + for(i=iEq; inLTerm; i++){ + if( pLoop->aLTerm[i]->pExpr==pX ){ + int iField; + assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 ); + iField = pLoop->aLTerm[i]->u.x.iField - 1; + if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */ + pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr); + pOrigRhs->a[iField].pExpr = 0; + if( pOrigLhs ){ + assert( pOrigLhs->a[iField].pExpr!=0 ); + pLhs = sqlite3ExprListAppend(pParse,pLhs,pOrigLhs->a[iField].pExpr); + pOrigLhs->a[iField].pExpr = 0; + } + } + } + sqlite3ExprListDelete(db, pOrigRhs); + if( pOrigLhs ){ + sqlite3ExprListDelete(db, pOrigLhs); + pNew->pLeft->x.pList = pLhs; + } + pSelect->pEList = pRhs; + if( pLhs && pLhs->nExpr==1 ){ + /* Take care here not to generate a TK_VECTOR containing only a + ** single value. Since the parser never creates such a vector, some + ** of the subroutines do not handle this case. */ + Expr *p = pLhs->a[0].pExpr; + pLhs->a[0].pExpr = 0; + sqlite3ExprDelete(db, pNew->pLeft); + pNew->pLeft = p; + } + if( pSelect->pOrderBy ){ + /* If the SELECT statement has an ORDER BY clause, zero the + ** iOrderByCol variables. These are set to non-zero when an + ** ORDER BY term exactly matches one of the terms of the + ** result-set. Since the result-set of the SELECT statement may + ** have been modified or reordered, these variables are no longer + ** set correctly. Since setting them is just an optimization, + ** it's easiest just to zero them here. */ + ExprList *pOrderBy = pSelect->pOrderBy; + for(i=0; inExpr; i++){ + pOrderBy->a[i].u.x.iOrderByCol = 0; + } } - } #if 0 - printf("For indexing, change the IN expr:\n"); - sqlite3TreeViewExpr(0, pX, 0); - printf("Into:\n"); - sqlite3TreeViewExpr(0, pNew, 0); + printf("For indexing, change the IN expr:\n"); + sqlite3TreeViewExpr(0, pX, 0); + printf("Into:\n"); + sqlite3TreeViewExpr(0, pNew, 0); #endif + } } return pNew; } @@ -155403,7 +162632,7 @@ static int codeAllEqualityTerms( /* Figure out how many memory cells we will need then allocate them. */ regBase = pParse->nMem + 1; - nReg = pLoop->u.btree.nEq + nExtraReg; + nReg = nEq + nExtraReg; pParse->nMem += nReg; zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx)); @@ -155450,9 +162679,6 @@ static int codeAllEqualityTerms( sqlite3VdbeAddOp2(v, OP_Copy, r1, regBase+j); } } - } - for(j=nSkip; jaLTerm[j]; if( pTerm->eOperator & WO_IN ){ if( pTerm->pExpr->flags & EP_xIsSelect ){ /* No affinity ever needs to be (or should be) applied to a value @@ -155508,7 +162734,7 @@ static void whereLikeOptimizationStringFixup( if( pTerm->wtFlags & TERM_LIKEOPT ){ VdbeOp *pOp; assert( pLevel->iLikeRepCntr>0 ); - pOp = sqlite3VdbeGetOp(v, -1); + pOp = sqlite3VdbeGetLastOp(v); assert( pOp!=0 ); assert( pOp->opcode==OP_String8 || pTerm->pWC->pWInfo->pParse->db->mallocFailed ); @@ -155595,18 +162821,19 @@ static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){ ** 2) transform the expression node to a TK_REGISTER node that reads ** from the newly populated register. ** -** Also, if the node is a TK_COLUMN that does access the table idenified +** Also, if the node is a TK_COLUMN that does access the table identified ** by pCCurHint.iTabCur, and an index is being used (which we will ** know because CCurHint.pIdx!=0) then transform the TK_COLUMN into ** an access of the index rather than the original table. */ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ int rc = WRC_Continue; + int reg; struct CCurHint *pHint = pWalker->u.pCCurHint; if( pExpr->op==TK_COLUMN ){ if( pExpr->iTable!=pHint->iTabCur ){ - int reg = ++pWalker->pParse->nMem; /* Register for column value */ - sqlite3ExprCode(pWalker->pParse, pExpr, reg); + reg = ++pWalker->pParse->nMem; /* Register for column value */ + reg = sqlite3ExprCodeTarget(pWalker->pParse, pExpr, reg); pExpr->op = TK_REGISTER; pExpr->iTable = reg; }else if( pHint->pIdx!=0 ){ @@ -155614,15 +162841,15 @@ static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){ pExpr->iColumn = sqlite3TableColumnToIndex(pHint->pIdx, pExpr->iColumn); assert( pExpr->iColumn>=0 ); } - }else if( pExpr->op==TK_AGG_FUNCTION ){ - /* An aggregate function in the WHERE clause of a query means this must - ** be a correlated sub-query, and expression pExpr is an aggregate from - ** the parent context. Do not walk the function arguments in this case. - ** - ** todo: It should be possible to replace this node with a TK_REGISTER - ** expression, as the result of the expression must be stored in a - ** register at this point. The same holds for TK_AGG_COLUMN nodes. */ + }else if( pExpr->pAggInfo ){ rc = WRC_Prune; + reg = ++pWalker->pParse->nMem; /* Register for column value */ + reg = sqlite3ExprCodeTarget(pWalker->pParse, pExpr, reg); + pExpr->op = TK_REGISTER; + pExpr->iTable = reg; + }else if( pExpr->op==TK_TRUEFALSE ){ + /* Do not walk disabled expressions. tag-20230504-1 */ + return WRC_Prune; } return rc; } @@ -155724,7 +162951,7 @@ static void codeCursorHint( } if( pExpr!=0 ){ sWalker.xExprCallback = codeCursorHintFixExpr; - sqlite3WalkExpr(&sWalker, pExpr); + if( pParse->nErr==0 ) sqlite3WalkExpr(&sWalker, pExpr); sqlite3VdbeAddOp4(v, OP_CursorHint, (sHint.pIdx ? sHint.iIdxCur : sHint.iTabCur), 0, 0, (const char*)pExpr, P4_EXPR); @@ -155832,143 +163059,6 @@ static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){ } } -/* An instance of the IdxExprTrans object carries information about a -** mapping from an expression on table columns into a column in an index -** down through the Walker. -*/ -typedef struct IdxExprTrans { - Expr *pIdxExpr; /* The index expression */ - int iTabCur; /* The cursor of the corresponding table */ - int iIdxCur; /* The cursor for the index */ - int iIdxCol; /* The column for the index */ - int iTabCol; /* The column for the table */ - WhereInfo *pWInfo; /* Complete WHERE clause information */ - sqlite3 *db; /* Database connection (for malloc()) */ -} IdxExprTrans; - -/* -** Preserve pExpr on the WhereETrans list of the WhereInfo. -*/ -static void preserveExpr(IdxExprTrans *pTrans, Expr *pExpr){ - WhereExprMod *pNew; - pNew = sqlite3DbMallocRaw(pTrans->db, sizeof(*pNew)); - if( pNew==0 ) return; - pNew->pNext = pTrans->pWInfo->pExprMods; - pTrans->pWInfo->pExprMods = pNew; - pNew->pExpr = pExpr; - memcpy(&pNew->orig, pExpr, sizeof(*pExpr)); -} - -/* The walker node callback used to transform matching expressions into -** a reference to an index column for an index on an expression. -** -** If pExpr matches, then transform it into a reference to the index column -** that contains the value of pExpr. -*/ -static int whereIndexExprTransNode(Walker *p, Expr *pExpr){ - IdxExprTrans *pX = p->u.pIdxTrans; - if( sqlite3ExprCompare(0, pExpr, pX->pIdxExpr, pX->iTabCur)==0 ){ - pExpr = sqlite3ExprSkipCollate(pExpr); - preserveExpr(pX, pExpr); - pExpr->affExpr = sqlite3ExprAffinity(pExpr); - pExpr->op = TK_COLUMN; - pExpr->iTable = pX->iIdxCur; - pExpr->iColumn = pX->iIdxCol; - testcase( ExprHasProperty(pExpr, EP_Unlikely) ); - ExprClearProperty(pExpr, EP_Skip|EP_Unlikely|EP_WinFunc|EP_Subrtn); - pExpr->y.pTab = 0; - return WRC_Prune; - }else{ - return WRC_Continue; - } -} - -#ifndef SQLITE_OMIT_GENERATED_COLUMNS -/* A walker node callback that translates a column reference to a table -** into a corresponding column reference of an index. -*/ -static int whereIndexExprTransColumn(Walker *p, Expr *pExpr){ - if( pExpr->op==TK_COLUMN ){ - IdxExprTrans *pX = p->u.pIdxTrans; - if( pExpr->iTable==pX->iTabCur && pExpr->iColumn==pX->iTabCol ){ - assert( ExprUseYTab(pExpr) && pExpr->y.pTab!=0 ); - preserveExpr(pX, pExpr); - pExpr->affExpr = sqlite3TableColumnAffinity(pExpr->y.pTab,pExpr->iColumn); - pExpr->iTable = pX->iIdxCur; - pExpr->iColumn = pX->iIdxCol; - pExpr->y.pTab = 0; - } - } - return WRC_Continue; -} -#endif /* SQLITE_OMIT_GENERATED_COLUMNS */ - -/* -** For an indexes on expression X, locate every instance of expression X -** in pExpr and change that subexpression into a reference to the appropriate -** column of the index. -** -** 2019-10-24: Updated to also translate references to a VIRTUAL column in -** the table into references to the corresponding (stored) column of the -** index. -*/ -static void whereIndexExprTrans( - Index *pIdx, /* The Index */ - int iTabCur, /* Cursor of the table that is being indexed */ - int iIdxCur, /* Cursor of the index itself */ - WhereInfo *pWInfo /* Transform expressions in this WHERE clause */ -){ - int iIdxCol; /* Column number of the index */ - ExprList *aColExpr; /* Expressions that are indexed */ - Table *pTab; - Walker w; - IdxExprTrans x; - aColExpr = pIdx->aColExpr; - if( aColExpr==0 && !pIdx->bHasVCol ){ - /* The index does not reference any expressions or virtual columns - ** so no translations are needed. */ - return; - } - pTab = pIdx->pTable; - memset(&w, 0, sizeof(w)); - w.u.pIdxTrans = &x; - x.iTabCur = iTabCur; - x.iIdxCur = iIdxCur; - x.pWInfo = pWInfo; - x.db = pWInfo->pParse->db; - for(iIdxCol=0; iIdxColnColumn; iIdxCol++){ - i16 iRef = pIdx->aiColumn[iIdxCol]; - if( iRef==XN_EXPR ){ - assert( aColExpr!=0 && aColExpr->a[iIdxCol].pExpr!=0 ); - x.pIdxExpr = aColExpr->a[iIdxCol].pExpr; - if( sqlite3ExprIsConstant(x.pIdxExpr) ) continue; - w.xExprCallback = whereIndexExprTransNode; -#ifndef SQLITE_OMIT_GENERATED_COLUMNS - }else if( iRef>=0 - && (pTab->aCol[iRef].colFlags & COLFLAG_VIRTUAL)!=0 - && ((pTab->aCol[iRef].colFlags & COLFLAG_HASCOLL)==0 - || sqlite3StrICmp(sqlite3ColumnColl(&pTab->aCol[iRef]), - sqlite3StrBINARY)==0) - ){ - /* Check to see if there are direct references to generated columns - ** that are contained in the index. Pulling the generated column - ** out of the index is an optimization only - the main table is always - ** available if the index cannot be used. To avoid unnecessary - ** complication, omit this optimization if the collating sequence for - ** the column is non-standard */ - x.iTabCol = iRef; - w.xExprCallback = whereIndexExprTransColumn; -#endif /* SQLITE_OMIT_GENERATED_COLUMNS */ - }else{ - continue; - } - x.iIdxCol = iIdxCol; - sqlite3WalkExpr(&w, pWInfo->pWhere); - sqlite3WalkExprList(&w, pWInfo->pOrderBy); - sqlite3WalkExprList(&w, pWInfo->pResultSet); - } -} - /* ** The pTruth expression is always true because it is the WHERE clause ** a partial index that is driving a query loop. Look through all of the @@ -156037,6 +163127,8 @@ static SQLITE_NOINLINE void filterPullDown( testcase( pTerm->wtFlags & TERM_VIRTUAL ); regRowid = sqlite3GetTempReg(pParse); regRowid = codeEqualityTerm(pParse, pTerm, pLevel, 0, 0, regRowid); + sqlite3VdbeAddOp2(pParse->pVdbe, OP_MustBeInt, regRowid, addrNxt); + VdbeCoverage(pParse->pVdbe); sqlite3VdbeAddOp4Int(pParse->pVdbe, OP_Filter, pLevel->regFilter, addrNxt, regRowid, 1); VdbeCoverage(pParse->pVdbe); @@ -156096,13 +163188,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur); bRev = (pWInfo->revMask>>iLevel)&1; VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName)); -#if WHERETRACE_ENABLED /* 0x20800 */ - if( sqlite3WhereTrace & 0x800 ){ +#if WHERETRACE_ENABLED /* 0x4001 */ + if( sqlite3WhereTrace & 0x1 ){ sqlite3DebugPrintf("Coding level %d of %d: notReady=%llx iFrom=%d\n", iLevel, pWInfo->nLevel, (u64)notReady, pLevel->iFrom); - sqlite3WhereLoopPrint(pLoop, pWC); + if( sqlite3WhereTrace & 0x1000 ){ + sqlite3WhereLoopPrint(pLoop, pWC); + } } - if( sqlite3WhereTrace & 0x20000 ){ + if( (sqlite3WhereTrace & 0x4001)==0x4001 ){ if( iLevel==0 ){ sqlite3DebugPrintf("WHERE clause being coded:\n"); sqlite3TreeViewExpr(0, pWInfo->pWhere, 0); @@ -156188,9 +163282,9 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( && pLoop->u.vtab.bOmitOffset ){ assert( pTerm->eOperator==WO_AUX ); - assert( pWInfo->pLimit!=0 ); - assert( pWInfo->pLimit->iOffset>0 ); - sqlite3VdbeAddOp2(v, OP_Integer, 0, pWInfo->pLimit->iOffset); + assert( pWInfo->pSelect!=0 ); + assert( pWInfo->pSelect->iOffset>0 ); + sqlite3VdbeAddOp2(v, OP_Integer, 0, pWInfo->pSelect->iOffset); VdbeComment((v,"Zero OFFSET counter")); } } @@ -156298,6 +163392,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg); addrNxt = pLevel->addrNxt; if( pLevel->regFilter ){ + sqlite3VdbeAddOp2(v, OP_MustBeInt, iRowidReg, addrNxt); + VdbeCoverage(v); sqlite3VdbeAddOp4Int(v, OP_Filter, pLevel->regFilter, addrNxt, iRowidReg, 1); VdbeCoverage(v); @@ -156343,7 +163439,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( }; assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */ assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */ - assert( TK_GE==TK_GT+3 ); /* ... is correcct. */ + assert( TK_GE==TK_GT+3 ); /* ... is correct. */ assert( (pStart->wtFlags & TERM_VNULL)==0 ); testcase( pStart->wtFlags & TERM_VIRTUAL ); @@ -156649,6 +163745,11 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( ** guess. */ addrSeekScan = sqlite3VdbeAddOp1(v, OP_SeekScan, (pIdx->aiRowLogEst[0]+9)/10); + if( pRangeStart || pRangeEnd ){ + sqlite3VdbeChangeP5(v, 1); + sqlite3VdbeChangeP2(v, addrSeekScan, sqlite3VdbeCurrentAddr(v)+1); + addrSeekScan = 0; + } VdbeCoverage(v); } sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint); @@ -156685,16 +163786,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( assert( pLevel->p2==0 ); if( pRangeEnd ){ Expr *pRight = pRangeEnd->pExpr->pRight; - if( addrSeekScan ){ - /* For a seek-scan that has a range on the lowest term of the index, - ** we have to make the top of the loop be code that sets the end - ** condition of the range. Otherwise, the OP_SeekScan might jump - ** over that initialization, leaving the range-end value set to the - ** range-start value, resulting in a wrong answer. - ** See ticket 5981a8c041a3c2f3 (2021-11-02). - */ - pLevel->p2 = sqlite3VdbeCurrentAddr(v); - } + assert( addrSeekScan==0 ); codeExprOrVector(pParse, pRight, regBase+nEq, nTop); whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd); if( (pRangeEnd->wtFlags & TERM_VNULL)==0 @@ -156724,11 +163816,11 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } nConstraint++; } - sqlite3DbFree(db, zStartAff); - sqlite3DbFree(db, zEndAff); + if( zStartAff ) sqlite3DbNNFreeNN(db, zStartAff); + if( zEndAff ) sqlite3DbNNFreeNN(db, zEndAff); /* Top of the loop body */ - if( pLevel->p2==0 ) pLevel->p2 = sqlite3VdbeCurrentAddr(v); + pLevel->p2 = sqlite3VdbeCurrentAddr(v); /* Check if the index cursor is past the end of the range. */ if( nConstraint ){ @@ -156787,27 +163879,6 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } if( pLevel->iLeftJoin==0 ){ - /* If pIdx is an index on one or more expressions, then look through - ** all the expressions in pWInfo and try to transform matching expressions - ** into reference to index columns. Also attempt to translate references - ** to virtual columns in the table into references to (stored) columns - ** of the index. - ** - ** Do not do this for the RHS of a LEFT JOIN. This is because the - ** expression may be evaluated after OP_NullRow has been executed on - ** the cursor. In this case it is important to do the full evaluation, - ** as the result of the expression may not be NULL, even if all table - ** column values are. https://www.sqlite.org/src/info/7fa8049685b50b5a - ** - ** Also, do not do this when processing one index an a multi-index - ** OR clause, since the transformation will become invalid once we - ** move forward to the next index. - ** https://sqlite.org/src/info/4e8e4857d32d401f - */ - if( (pWInfo->wctrlFlags & (WHERE_OR_SUBCLAUSE|WHERE_RIGHT_JOIN))==0 ){ - whereIndexExprTrans(pIdx, iCur, iIdxCur, pWInfo); - } - /* If a partial index is driving the loop, try to eliminate WHERE clause ** terms from the query that must be true due to the WHERE clause of ** the partial index. @@ -156920,7 +163991,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( int nNotReady; /* The number of notReady tables */ SrcItem *origSrc; /* Original list of tables */ nNotReady = pWInfo->nLevel - iLevel - 1; - pOrTab = sqlite3StackAllocRaw(db, + pOrTab = sqlite3DbMallocRawNN(db, sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0])); if( pOrTab==0 ) return notReady; pOrTab->nAlloc = (u8)(nNotReady + 1); @@ -157040,7 +164111,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } /* Loop through table entries that match term pOrTerm. */ ExplainQueryPlan((pParse, 1, "INDEX %d", ii+1)); - WHERETRACE(0xffff, ("Subplan for OR-clause:\n")); + WHERETRACE(0xffffffff, ("Subplan for OR-clause:\n")); pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, 0, WHERE_OR_SUBCLAUSE, iCovCur); assert( pSubWInfo || pParse->nErr ); @@ -157173,7 +164244,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( assert( pLevel->op==OP_Return ); pLevel->p2 = sqlite3VdbeCurrentAddr(v); - if( pWInfo->nLevel>1 ){ sqlite3StackFree(db, pOrTab); } + if( pWInfo->nLevel>1 ){ sqlite3DbFreeNN(db, pOrTab); } if( !untestedTerms ) disableTerm(pLevel, pTerm); }else #endif /* SQLITE_OMIT_OR_OPTIMIZATION */ @@ -157277,12 +164348,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } #endif } -#ifdef WHERETRACE_ENABLED /* 0xffff */ +#ifdef WHERETRACE_ENABLED /* 0xffffffff */ if( sqlite3WhereTrace ){ VdbeNoopComment((v, "WhereTerm[%d] (%p) priority=%d", pWC->nTerm-j, pTerm, iLoop)); } - if( sqlite3WhereTrace & 0x800 ){ + if( sqlite3WhereTrace & 0x4000 ){ sqlite3DebugPrintf("Coding auxiliary constraint:\n"); sqlite3WhereTermPrint(pTerm, pWC->nTerm-j); } @@ -157311,8 +164382,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( if( pTerm->leftCursor!=iCur ) continue; if( pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT) ) continue; pE = pTerm->pExpr; -#ifdef WHERETRACE_ENABLED /* 0x800 */ - if( sqlite3WhereTrace & 0x800 ){ +#ifdef WHERETRACE_ENABLED /* 0x4001 */ + if( (sqlite3WhereTrace & 0x4001)==0x4001 ){ sqlite3DebugPrintf("Coding transitive constraint:\n"); sqlite3WhereTermPrint(pTerm, pWC->nTerm-j); } @@ -157427,13 +164498,13 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart( } } -#if WHERETRACE_ENABLED /* 0x20800 */ - if( sqlite3WhereTrace & 0x20000 ){ +#if WHERETRACE_ENABLED /* 0x4001 */ + if( sqlite3WhereTrace & 0x4000 ){ sqlite3DebugPrintf("All WHERE-clause terms after coding level %d:\n", iLevel); sqlite3WhereClausePrint(pWC); } - if( sqlite3WhereTrace & 0x800 ){ + if( sqlite3WhereTrace & 0x1 ){ sqlite3DebugPrintf("End Coding level %d: notReady=%llx\n", iLevel, (u64)pLevel->notReady); } @@ -157548,7 +164619,7 @@ SQLITE_PRIVATE SQLITE_NOINLINE void sqlite3WhereRightJoinLoop( ** the WHERE clause of SQL statements. ** ** This file was originally part of where.c but was split out to improve -** readability and editabiliity. This file contains utility routines for +** readability and editability. This file contains utility routines for ** analyzing Expr objects in the WHERE clause. */ /* #include "sqliteInt.h" */ @@ -157764,7 +164835,7 @@ static int isLikeOrGlob( ** range search. The third is because the caller assumes that the pattern ** consists of at least one character after all escapes have been ** removed. */ - if( cnt!=0 && 255!=(u8)z[cnt-1] && (cnt>1 || z[0]!=wc[3]) ){ + if( (cnt>1 || (cnt>0 && z[0]!=wc[3])) && 255!=(u8)z[cnt-1] ){ Expr *pPrefix; /* A "complete" match if the pattern ends with "*" or "%" */ @@ -157801,7 +164872,7 @@ static int isLikeOrGlob( if( pLeft->op!=TK_COLUMN || sqlite3ExprAffinity(pLeft)!=SQLITE_AFF_TEXT || (ALWAYS( ExprUseYTab(pLeft) ) - && pLeft->y.pTab + && ALWAYS(pLeft->y.pTab) && IsVirtual(pLeft->y.pTab)) /* Might be numeric */ ){ int isNum; @@ -157918,8 +164989,7 @@ static int isAuxiliaryVtabOperator( ** MATCH(expression,vtab_column) */ pCol = pList->a[1].pExpr; - assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) ); - testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); + assert( pCol->op!=TK_COLUMN || (ExprUseYTab(pCol) && pCol->y.pTab!=0) ); if( ExprIsVtab(pCol) ){ for(i=0; ia[0].pExpr; assert( pCol->op!=TK_COLUMN || ExprUseYTab(pCol) ); - testcase( pCol->op==TK_COLUMN && pCol->y.pTab==0 ); + assert( pCol->op!=TK_COLUMN || (ExprUseYTab(pCol) && pCol->y.pTab!=0) ); if( ExprIsVtab(pCol) ){ sqlite3_vtab *pVtab; sqlite3_module *pMod; @@ -157969,13 +165039,12 @@ static int isAuxiliaryVtabOperator( int res = 0; Expr *pLeft = pExpr->pLeft; Expr *pRight = pExpr->pRight; - assert( pLeft->op!=TK_COLUMN || ExprUseYTab(pLeft) ); - testcase( pLeft->op==TK_COLUMN && pLeft->y.pTab==0 ); + assert( pLeft->op!=TK_COLUMN || (ExprUseYTab(pLeft) && pLeft->y.pTab!=0) ); if( ExprIsVtab(pLeft) ){ res++; } - assert( pRight==0 || pRight->op!=TK_COLUMN || ExprUseYTab(pRight) ); - testcase( pRight && pRight->op==TK_COLUMN && pRight->y.pTab==0 ); + assert( pRight==0 || pRight->op!=TK_COLUMN + || (ExprUseYTab(pRight) && pRight->y.pTab!=0) ); if( pRight && ExprIsVtab(pRight) ){ res++; SWAP(Expr*, pLeft, pRight); @@ -158339,7 +165408,7 @@ static void exprAnalyzeOrTerm( pOrTerm->leftCursor))==0 ){ /* This term must be of the form t1.a==t2.b where t2 is in the ** chngToIN set but t1 is not. This term will be either preceded - ** or follwed by an inverted copy (t2.b==t1.a). Skip this term + ** or followed by an inverted copy (t2.b==t1.a). Skip this term ** and use its inversion. */ testcase( pOrTerm->wtFlags & TERM_COPIED ); testcase( pOrTerm->wtFlags & TERM_VIRTUAL ); @@ -158511,35 +165580,40 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){ */ static SQLITE_NOINLINE int exprMightBeIndexed2( SrcList *pFrom, /* The FROM clause */ - Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */ int *aiCurCol, /* Write the referenced table cursor and column here */ - Expr *pExpr /* An operand of a comparison operator */ + Expr *pExpr, /* An operand of a comparison operator */ + int j /* Start looking with the j-th pFrom entry */ ){ Index *pIdx; int i; int iCur; - for(i=0; mPrereq>1; i++, mPrereq>>=1){} - iCur = pFrom->a[i].iCursor; - for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ - if( pIdx->aColExpr==0 ) continue; - for(i=0; inKeyCol; i++){ - if( pIdx->aiColumn[i]!=XN_EXPR ) continue; - if( sqlite3ExprCompareSkip(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){ - aiCurCol[0] = iCur; - aiCurCol[1] = XN_EXPR; - return 1; + do{ + iCur = pFrom->a[j].iCursor; + for(pIdx=pFrom->a[j].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->aColExpr==0 ) continue; + for(i=0; inKeyCol; i++){ + if( pIdx->aiColumn[i]!=XN_EXPR ) continue; + assert( pIdx->bHasExpr ); + if( sqlite3ExprCompareSkip(pExpr,pIdx->aColExpr->a[i].pExpr,iCur)==0 + && pExpr->op!=TK_STRING + ){ + aiCurCol[0] = iCur; + aiCurCol[1] = XN_EXPR; + return 1; + } } } - } + }while( ++j < pFrom->nSrc ); return 0; } static int exprMightBeIndexed( SrcList *pFrom, /* The FROM clause */ - Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */ int *aiCurCol, /* Write the referenced table cursor & column here */ Expr *pExpr, /* An operand of a comparison operator */ int op /* The specific comparison operator */ ){ + int i; + /* If this expression is a vector to the left or right of a ** inequality constraint (>, <, >= or <=), perform the processing ** on the first element of the vector. */ @@ -158549,7 +165623,6 @@ static int exprMightBeIndexed( if( pExpr->op==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){ assert( ExprUseXList(pExpr) ); pExpr = pExpr->x.pList->a[0].pExpr; - } if( pExpr->op==TK_COLUMN ){ @@ -158557,9 +165630,16 @@ static int exprMightBeIndexed( aiCurCol[1] = pExpr->iColumn; return 1; } - if( mPrereq==0 ) return 0; /* No table references */ - if( (mPrereq&(mPrereq-1))!=0 ) return 0; /* Refs more than one table */ - return exprMightBeIndexed2(pFrom,mPrereq,aiCurCol,pExpr); + + for(i=0; inSrc; i++){ + Index *pIdx; + for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){ + if( pIdx->aColExpr ){ + return exprMightBeIndexed2(pFrom,aiCurCol,pExpr,i); + } + } + } + return 0; } @@ -158590,8 +165670,8 @@ static void exprAnalyze( WhereTerm *pTerm; /* The term to be analyzed */ WhereMaskSet *pMaskSet; /* Set of table index masks */ Expr *pExpr; /* The expression to be analyzed */ - Bitmask prereqLeft; /* Prerequesites of the pExpr->pLeft */ - Bitmask prereqAll; /* Prerequesites of pExpr */ + Bitmask prereqLeft; /* Prerequisites of the pExpr->pLeft */ + Bitmask prereqAll; /* Prerequisites of pExpr */ Bitmask extraRight = 0; /* Extra dependencies on LEFT JOIN */ Expr *pStr1 = 0; /* RHS of LIKE/GLOB operator */ int isComplete = 0; /* RHS of LIKE/GLOB ends with wildcard */ @@ -158685,7 +165765,7 @@ static void exprAnalyze( pLeft = pLeft->x.pList->a[pTerm->u.x.iField-1].pExpr; } - if( exprMightBeIndexed(pSrc, prereqLeft, aiCurCol, pLeft, op) ){ + if( exprMightBeIndexed(pSrc, aiCurCol, pLeft, op) ){ pTerm->leftCursor = aiCurCol[0]; assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); pTerm->u.x.leftColumn = aiCurCol[1]; @@ -158693,7 +165773,7 @@ static void exprAnalyze( } if( op==TK_IS ) pTerm->wtFlags |= TERM_IS; if( pRight - && exprMightBeIndexed(pSrc, pTerm->prereqRight, aiCurCol, pRight, op) + && exprMightBeIndexed(pSrc, aiCurCol, pRight, op) && !ExprHasProperty(pRight, EP_FixedCol) ){ WhereTerm *pNew; @@ -158737,7 +165817,7 @@ static void exprAnalyze( && 0==sqlite3ExprCanBeNull(pLeft) ){ assert( !ExprHasProperty(pExpr, EP_IntValue) ); - pExpr->op = TK_TRUEFALSE; + pExpr->op = TK_TRUEFALSE; /* See tag-20230504-1 */ pExpr->u.zToken = "false"; ExprSetProperty(pExpr, EP_IsFalse); pTerm->prereqAll = 0; @@ -158904,7 +165984,6 @@ static void exprAnalyze( transferJoinMarkings(pNewExpr1, pExpr); idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags); testcase( idxNew1==0 ); - exprAnalyze(pSrc, pWC, idxNew1); pNewExpr2 = sqlite3ExprDup(db, pLeft, 0); pNewExpr2 = sqlite3PExpr(pParse, TK_LT, sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName), @@ -158912,6 +165991,7 @@ static void exprAnalyze( transferJoinMarkings(pNewExpr2, pExpr); idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags); testcase( idxNew2==0 ); + exprAnalyze(pSrc, pWC, idxNew1); exprAnalyze(pSrc, pWC, idxNew2); pTerm = &pWC->a[idxTerm]; if( isComplete ){ @@ -158968,7 +166048,7 @@ static void exprAnalyze( && pTerm->u.x.iField==0 && pExpr->pLeft->op==TK_VECTOR && ALWAYS( ExprUseXSelect(pExpr) ) - && pExpr->x.pSelect->pPrior==0 + && (pExpr->x.pSelect->pPrior==0 || (pExpr->x.pSelect->selFlags & SF_Values)) #ifndef SQLITE_OMIT_WINDOWFUNC && pExpr->x.pSelect->pWin==0 #endif @@ -159137,9 +166217,9 @@ static void whereAddLimitExpr( ** exist only so that they may be passed to the xBestIndex method of the ** single virtual table in the FROM clause of the SELECT. */ -SQLITE_PRIVATE void sqlite3WhereAddLimit(WhereClause *pWC, Select *p){ - assert( p==0 || (p->pGroupBy==0 && (p->selFlags & SF_Aggregate)==0) ); - if( (p && p->pLimit) /* 1 */ +SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Select *p){ + assert( p!=0 && p->pLimit!=0 ); /* 1 -- checked by caller */ + if( p->pGroupBy==0 && (p->selFlags & (SF_Distinct|SF_Aggregate))==0 /* 2 */ && (p->pSrc->nSrc==1 && IsVirtual(p->pSrc->a[0].pTab)) /* 3 */ ){ @@ -159156,6 +166236,13 @@ SQLITE_PRIVATE void sqlite3WhereAddLimit(WhereClause *pWC, Select *p){ assert( pWC->a[ii].eOperator==WO_ROWVAL ); continue; } + if( pWC->a[ii].nChild ){ + /* If this term has child terms, then they are also part of the + ** pWC->a[] array. So this term can be ignored, as a LIMIT clause + ** will only be added if each of the child terms passes the + ** (leftCursor==iCsr) test below. */ + continue; + } if( pWC->a[ii].leftCursor!=iCsr ) return; } @@ -159375,9 +166462,12 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs( pRhs = sqlite3PExpr(pParse, TK_UPLUS, sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0); pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, pRhs); - if( pItem->fg.jointype & (JT_LEFT|JT_LTORJ) ){ + if( pItem->fg.jointype & (JT_LEFT|JT_RIGHT) ){ + testcase( pItem->fg.jointype & JT_LEFT ); /* testtag-20230227a */ + testcase( pItem->fg.jointype & JT_RIGHT ); /* testtag-20230227b */ joinType = EP_OuterON; }else{ + testcase( pItem->fg.jointype & JT_LTORJ ); /* testtag-20230227c */ joinType = EP_InnerON; } sqlite3SetJoinExpr(pTerm, pItem->iCursor, joinType); @@ -159456,7 +166546,7 @@ SQLITE_PRIVATE int sqlite3WhereIsDistinct(WhereInfo *pWInfo){ ** block sorting is required. */ SQLITE_PRIVATE int sqlite3WhereIsOrdered(WhereInfo *pWInfo){ - return pWInfo->nOBSat; + return pWInfo->nOBSat<0 ? 0 : pWInfo->nOBSat; } /* @@ -160067,12 +167157,22 @@ static void translateColumnToCopy( for(; iStartp1!=iTabCur ) continue; if( pOp->opcode==OP_Column ){ +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("TRANSLATE OP_Column to OP_Copy at %d\n", iStart); + } +#endif pOp->opcode = OP_Copy; pOp->p1 = pOp->p2 + iRegister; pOp->p2 = pOp->p3; pOp->p3 = 0; pOp->p5 = 2; /* Cause the MEM_Subtype flag to be cleared */ }else if( pOp->opcode==OP_Rowid ){ +#ifdef SQLITE_DEBUG + if( pParse->db->flags & SQLITE_VdbeAddopTrace ){ + printf("TRANSLATE OP_Rowid to OP_Sequence at %d\n", iStart); + } +#endif pOp->opcode = OP_Sequence; pOp->p1 = iAutoidxCur; #ifdef SQLITE_ALLOW_ROWID_IN_VIEW @@ -160094,7 +167194,7 @@ static void translateColumnToCopy( #if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED) static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ int i; - if( !sqlite3WhereTrace ) return; + if( (sqlite3WhereTrace & 0x10)==0 ) return; for(i=0; inConstraint; i++){ sqlite3DebugPrintf( " constraint[%d]: col=%d termid=%d op=%d usabled=%d collseq=%s\n", @@ -160114,7 +167214,7 @@ static void whereTraceIndexInfoInputs(sqlite3_index_info *p){ } static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ int i; - if( !sqlite3WhereTrace ) return; + if( (sqlite3WhereTrace & 0x10)==0 ) return; for(i=0; inConstraint; i++){ sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n", i, @@ -160132,6 +167232,43 @@ static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){ #define whereTraceIndexInfoOutputs(A) #endif +/* +** We know that pSrc is an operand of an outer join. Return true if +** pTerm is a constraint that is compatible with that join. +** +** pTerm must be EP_OuterON if pSrc is the right operand of an +** outer join. pTerm can be either EP_OuterON or EP_InnerON if pSrc +** is the left operand of a RIGHT join. +** +** See https://sqlite.org/forum/forumpost/206d99a16dd9212f +** for an example of a WHERE clause constraints that may not be used on +** the right table of a RIGHT JOIN because the constraint implies a +** not-NULL condition on the left table of the RIGHT JOIN. +*/ +static int constraintCompatibleWithOuterJoin( + const WhereTerm *pTerm, /* WHERE clause term to check */ + const SrcItem *pSrc /* Table we are trying to access */ +){ + assert( (pSrc->fg.jointype&(JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 ); /* By caller */ + testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LEFT ); + testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LTORJ ); + testcase( ExprHasProperty(pTerm->pExpr, EP_OuterON) ) + testcase( ExprHasProperty(pTerm->pExpr, EP_InnerON) ); + if( !ExprHasProperty(pTerm->pExpr, EP_OuterON|EP_InnerON) + || pTerm->pExpr->w.iJoin != pSrc->iCursor + ){ + return 0; + } + if( (pSrc->fg.jointype & (JT_LEFT|JT_RIGHT))!=0 + && ExprHasProperty(pTerm->pExpr, EP_InnerON) + ){ + return 0; + } + return 1; +} + + + #ifndef SQLITE_OMIT_AUTOMATIC_INDEX /* ** Return TRUE if the WHERE clause term pTerm is of a form where it @@ -160147,16 +167284,10 @@ static int termCanDriveIndex( if( pTerm->leftCursor!=pSrc->iCursor ) return 0; if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) return 0; assert( (pSrc->fg.jointype & JT_RIGHT)==0 ); - if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 ){ - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LEFT ); - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LTORJ ); - testcase( ExprHasProperty(pTerm->pExpr, EP_OuterON) ) - testcase( ExprHasProperty(pTerm->pExpr, EP_InnerON) ); - if( !ExprHasProperty(pTerm->pExpr, EP_OuterON|EP_InnerON) - || pTerm->pExpr->w.iJoin != pSrc->iCursor - ){ - return 0; /* See tag-20191211-001 */ - } + if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 + && !constraintCompatibleWithOuterJoin(pTerm,pSrc) + ){ + return 0; /* See https://sqlite.org/forum/forumpost/51e6959f61 */ } if( (pTerm->prereqRight & notReady)!=0 ) return 0; assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); @@ -160170,6 +167301,57 @@ static int termCanDriveIndex( #ifndef SQLITE_OMIT_AUTOMATIC_INDEX + +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS +/* +** Argument pIdx represents an automatic index that the current statement +** will create and populate. Add an OP_Explain with text of the form: +** +** CREATE AUTOMATIC INDEX ON
    () [WHERE ] +** +** This is only required if sqlite3_stmt_scanstatus() is enabled, to +** associate an SQLITE_SCANSTAT_NCYCLE and SQLITE_SCANSTAT_NLOOP +** values with. In order to avoid breaking legacy code and test cases, +** the OP_Explain is not added if this is an EXPLAIN QUERY PLAN command. +*/ +static void explainAutomaticIndex( + Parse *pParse, + Index *pIdx, /* Automatic index to explain */ + int bPartial, /* True if pIdx is a partial index */ + int *pAddrExplain /* OUT: Address of OP_Explain */ +){ + if( IS_STMT_SCANSTATUS(pParse->db) && pParse->explain!=2 ){ + Table *pTab = pIdx->pTable; + const char *zSep = ""; + char *zText = 0; + int ii = 0; + sqlite3_str *pStr = sqlite3_str_new(pParse->db); + sqlite3_str_appendf(pStr,"CREATE AUTOMATIC INDEX ON %s(", pTab->zName); + assert( pIdx->nColumn>1 ); + assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID ); + for(ii=0; ii<(pIdx->nColumn-1); ii++){ + const char *zName = 0; + int iCol = pIdx->aiColumn[ii]; + + zName = pTab->aCol[iCol].zCnName; + sqlite3_str_appendf(pStr, "%s%s", zSep, zName); + zSep = ", "; + } + zText = sqlite3_str_finish(pStr); + if( zText==0 ){ + sqlite3OomFault(pParse->db); + }else{ + *pAddrExplain = sqlite3VdbeExplain( + pParse, 0, "%s)%s", zText, (bPartial ? " WHERE " : "") + ); + sqlite3_free(zText); + } + } +} +#else +# define explainAutomaticIndex(a,b,c,d) +#endif + /* ** Generate code to construct the Index object for an automatic index ** and to set up the WhereLevel object pLevel so that the code generator @@ -160177,8 +167359,7 @@ static int termCanDriveIndex( */ static SQLITE_NOINLINE void constructAutomaticIndex( Parse *pParse, /* The parsing context */ - const WhereClause *pWC, /* The WHERE clause */ - const SrcItem *pSrc, /* The FROM clause term to get the next index */ + WhereClause *pWC, /* The WHERE clause */ const Bitmask notReady, /* Mask of cursors that are not available */ WhereLevel *pLevel /* Write new index here */ ){ @@ -160199,12 +167380,17 @@ static SQLITE_NOINLINE void constructAutomaticIndex( char *zNotUsed; /* Extra space on the end of pIdx */ Bitmask idxCols; /* Bitmap of columns used for indexing */ Bitmask extraCols; /* Bitmap of additional columns */ - u8 sentWarning = 0; /* True if a warnning has been issued */ + u8 sentWarning = 0; /* True if a warning has been issued */ + u8 useBloomFilter = 0; /* True to also add a Bloom filter */ Expr *pPartial = 0; /* Partial Index Expression */ int iContinue = 0; /* Jump here to skip excluded rows */ - SrcItem *pTabItem; /* FROM clause term being indexed */ + SrcList *pTabList; /* The complete FROM clause */ + SrcItem *pSrc; /* The FROM clause term to get the next index */ int addrCounter = 0; /* Address where integer counter is initialized */ int regBase; /* Array of registers where record is assembled */ +#ifdef SQLITE_ENABLE_STMT_SCANSTATUS + int addrExp = 0; /* Address of OP_Explain */ +#endif /* Generate code to skip over the creation and initialization of the ** transient index on 2nd and subsequent iterations of the loop. */ @@ -160215,6 +167401,8 @@ static SQLITE_NOINLINE void constructAutomaticIndex( /* Count the number of columns that will be added to the index ** and used to match WHERE clause constraints */ nKeyCol = 0; + pTabList = pWC->pWInfo->pTabList; + pSrc = &pTabList->a[pLevel->iFrom]; pTable = pSrc->pTab; pWCEnd = &pWC->a[pWC->nTerm]; pLoop = pLevel->pWLoop; @@ -160225,7 +167413,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** WHERE clause (or the ON clause of a LEFT join) that constrain which ** rows of the target table (pSrc) that can be used. */ if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsTableConstraint(pExpr, pSrc) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, pLevel->iFrom) ){ pPartial = sqlite3ExprAnd(pParse, pPartial, sqlite3ExprDup(pParse->db, pExpr, 0)); @@ -160266,7 +167454,11 @@ static SQLITE_NOINLINE void constructAutomaticIndex( ** original table changes and the index and table cannot both be used ** if they go out of sync. */ - extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); + if( IsView(pTable) ){ + extraCols = ALLBITS; + }else{ + extraCols = pSrc->colUsed & (~idxCols | MASKBIT(BMS-1)); + } mxBitCol = MIN(BMS-1,pTable->nCol); testcase( pTable->nCol==BMS-1 ); testcase( pTable->nCol==BMS-2 ); @@ -160302,6 +167494,16 @@ static SQLITE_NOINLINE void constructAutomaticIndex( assert( pColl!=0 || pParse->nErr>0 ); /* TH3 collate01.800 */ pIdx->azColl[n] = pColl ? pColl->zName : sqlite3StrBINARY; n++; + if( ALWAYS(pX->pLeft!=0) + && sqlite3ExprAffinity(pX->pLeft)!=SQLITE_AFF_TEXT + ){ + /* TUNING: only use a Bloom filter on an automatic index + ** if one or more key columns has the ability to hold numeric + ** values, since strings all have the same hash in the Bloom + ** filter implementation and hence a Bloom filter on a text column + ** is not usually helpful. */ + useBloomFilter = 1; + } } } } @@ -160328,25 +167530,27 @@ static SQLITE_NOINLINE void constructAutomaticIndex( pIdx->azColl[n] = sqlite3StrBINARY; /* Create the automatic index */ + explainAutomaticIndex(pParse, pIdx, pPartial!=0, &addrExp); assert( pLevel->iIdxCur>=0 ); pLevel->iIdxCur = pParse->nTab++; sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1); sqlite3VdbeSetP4KeyInfo(pParse, pIdx); VdbeComment((v, "for %s", pTable->zName)); - if( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) ){ + if( OptimizationEnabled(pParse->db, SQLITE_BloomFilter) && useBloomFilter ){ + sqlite3WhereExplainBloomFilter(pParse, pWC->pWInfo, pLevel); pLevel->regFilter = ++pParse->nMem; sqlite3VdbeAddOp2(v, OP_Blob, 10000, pLevel->regFilter); } /* Fill the automatic index with content */ - pTabItem = &pWC->pWInfo->pTabList->a[pLevel->iFrom]; - if( pTabItem->fg.viaCoroutine ){ - int regYield = pTabItem->regReturn; + assert( pSrc == &pWC->pWInfo->pTabList->a[pLevel->iFrom] ); + if( pSrc->fg.viaCoroutine ){ + int regYield = pSrc->regReturn; addrCounter = sqlite3VdbeAddOp2(v, OP_Integer, 0, 0); - sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub); + sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pSrc->addrFillSub); addrTop = sqlite3VdbeAddOp1(v, OP_Yield, regYield); VdbeCoverage(v); - VdbeComment((v, "next row of %s", pTabItem->pTab->zName)); + VdbeComment((v, "next row of %s", pSrc->pTab->zName)); }else{ addrTop = sqlite3VdbeAddOp1(v, OP_Rewind, pLevel->iTabCur); VdbeCoverage(v); } @@ -160363,17 +167567,18 @@ static SQLITE_NOINLINE void constructAutomaticIndex( sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, regBase, pLoop->u.btree.nEq); } + sqlite3VdbeScanStatusCounters(v, addrExp, addrExp, sqlite3VdbeCurrentAddr(v)); sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord); sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT); if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue); - if( pTabItem->fg.viaCoroutine ){ + if( pSrc->fg.viaCoroutine ){ sqlite3VdbeChangeP2(v, addrCounter, regBase+n); testcase( pParse->db->mallocFailed ); assert( pLevel->iIdxCur>0 ); translateColumnToCopy(pParse, addrTop, pLevel->iTabCur, - pTabItem->regResult, pLevel->iIdxCur); + pSrc->regResult, pLevel->iIdxCur); sqlite3VdbeGoto(v, addrTop); - pTabItem->fg.viaCoroutine = 0; + pSrc->fg.viaCoroutine = 0; }else{ sqlite3VdbeAddOp2(v, OP_Next, pLevel->iTabCur, addrTop+1); VdbeCoverage(v); sqlite3VdbeChangeP5(v, SQLITE_STMTSTATUS_AUTOINDEX); @@ -160383,6 +167588,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex( /* Jump here when skipping the initialization */ sqlite3VdbeJumpHere(v, addrInit); + sqlite3VdbeScanStatusRange(v, addrExp, addrExp, -1); end_auto_index_create: sqlite3ExprDelete(pParse->db, pPartial); @@ -160424,16 +167630,26 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( Vdbe *v = pParse->pVdbe; /* VDBE under construction */ WhereLoop *pLoop = pLevel->pWLoop; /* The loop being coded */ int iCur; /* Cursor for table getting the filter */ + IndexedExpr *saved_pIdxEpr; /* saved copy of Parse.pIdxEpr */ + IndexedExpr *saved_pIdxPartExpr; /* saved copy of Parse.pIdxPartExpr */ + + saved_pIdxEpr = pParse->pIdxEpr; + saved_pIdxPartExpr = pParse->pIdxPartExpr; + pParse->pIdxEpr = 0; + pParse->pIdxPartExpr = 0; assert( pLoop!=0 ); assert( v!=0 ); assert( pLoop->wsFlags & WHERE_BLOOMFILTER ); + assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 ); addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v); do{ + const SrcList *pTabList; const SrcItem *pItem; const Table *pTab; u64 sz; + int iSrc; sqlite3WhereExplainBloomFilter(pParse, pWInfo, pLevel); addrCont = sqlite3VdbeMakeLabel(pParse); iCur = pLevel->iTabCur; @@ -160447,7 +167663,9 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( ** testing complicated. By basing the blob size on the value in the ** sqlite_stat1 table, testing is much easier. */ - pItem = &pWInfo->pTabList->a[pLevel->iFrom]; + pTabList = pWInfo->pTabList; + iSrc = pLevel->iFrom; + pItem = &pTabList->a[iSrc]; assert( pItem!=0 ); pTab = pItem->pTab; assert( pTab!=0 ); @@ -160464,7 +167682,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( for(pTerm=pWInfo->sWC.a; pTermpExpr; if( (pTerm->wtFlags & TERM_VIRTUAL)==0 - && sqlite3ExprIsTableConstraint(pExpr, pItem) + && sqlite3ExprIsSingleTableConstraint(pExpr, pTabList, iSrc) ){ sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL); } @@ -160480,9 +167698,8 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( int r1 = sqlite3GetTempRange(pParse, n); int jj; for(jj=0; jjaiColumn[jj]; assert( pIdx->pTable==pItem->pTab ); - sqlite3ExprCodeGetColumnOfTable(v, pIdx->pTable, iCur, iCol,r1+jj); + sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iCur, jj, r1+jj); } sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, n); sqlite3ReleaseTempRange(pParse, r1, n); @@ -160513,6 +167730,8 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter( } }while( iLevel < pWInfo->nLevel ); sqlite3VdbeJumpHere(v, addrOnce); + pParse->pIdxEpr = saved_pIdxEpr; + pParse->pIdxPartExpr = saved_pIdxPartExpr; } @@ -160568,22 +167787,10 @@ static sqlite3_index_info *allocateIndexInfo( assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 ); assert( pTerm->u.x.leftColumn>=XN_ROWID ); assert( pTerm->u.x.leftColumnnCol ); - - /* tag-20191211-002: WHERE-clause constraints are not useful to the - ** right-hand table of a LEFT JOIN nor to the either table of a - ** RIGHT JOIN. See tag-20191211-001 for the - ** equivalent restriction for ordinary tables. */ - if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 ){ - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LEFT ); - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_RIGHT ); - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LTORJ ); - testcase( ExprHasProperty(pTerm->pExpr, EP_OuterON) ); - testcase( ExprHasProperty(pTerm->pExpr, EP_InnerON) ); - if( !ExprHasProperty(pTerm->pExpr, EP_OuterON|EP_InnerON) - || pTerm->pExpr->w.iJoin != pSrc->iCursor - ){ - continue; - } + if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 + && !constraintCompatibleWithOuterJoin(pTerm,pSrc) + ){ + continue; } nTerm++; pTerm->wtFlags |= TERM_OK; @@ -160780,6 +167987,9 @@ static int vtabBestIndex(Parse *pParse, Table *pTab, sqlite3_index_info *p){ sqlite3ErrorMsg(pParse, "%s", pVtab->zErrMsg); } } + if( pTab->u.vtab.p->bAllSchemas ){ + sqlite3VtabUsesAllSchemas(pParse); + } sqlite3_free(pVtab->zErrMsg); pVtab->zErrMsg = 0; return rc; @@ -160824,6 +168034,7 @@ static int whereKeyStats( assert( pIdx->nSample>0 ); assert( pRec->nField>0 ); + /* Do a binary search to find the first sample greater than or equal ** to pRec. If pRec contains a single field, the set of samples to search ** is simply the aSample[] array. If the samples in aSample[] contain more @@ -160868,7 +168079,12 @@ static int whereKeyStats( ** it is extended to two fields. The duplicates that this creates do not ** cause any problems. */ - nField = MIN(pRec->nField, pIdx->nSample); + if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){ + nField = pIdx->nKeyCol; + }else{ + nField = pIdx->nColumn; + } + nField = MIN(pRec->nField, nField); iCol = 0; iSample = pIdx->nSample * nField; do{ @@ -160934,12 +168150,12 @@ static int whereKeyStats( if( iCol>0 ){ pRec->nField = iCol; assert( sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)<=0 - || pParse->db->mallocFailed ); + || pParse->db->mallocFailed || CORRUPT_DB ); } if( i>0 ){ pRec->nField = nField; assert( sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0 - || pParse->db->mallocFailed ); + || pParse->db->mallocFailed || CORRUPT_DB ); } } } @@ -160956,7 +168172,7 @@ static int whereKeyStats( ** is larger than all samples in the array. */ tRowcnt iUpper, iGap; if( i>=pIdx->nSample ){ - iUpper = sqlite3LogEstToInt(pIdx->aiRowLogEst[0]); + iUpper = pIdx->nRowEst0; }else{ iUpper = aSample[i].anLt[iCol]; } @@ -161031,7 +168247,7 @@ SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3 *db, Index *pIdx, int iCo ** Value pLoop->nOut is currently set to the estimated number of rows ** visited for scanning (a=? AND b=?). This function reduces that estimate ** by some factor to account for the (c BETWEEN ? AND ?) expression based -** on the stat4 data for the index. this scan will be peformed multiple +** on the stat4 data for the index. this scan will be performed multiple ** times (once for each (a,b) combination that matches a=?) is dealt with ** by the caller. ** @@ -161112,7 +168328,7 @@ static int whereRangeSkipScanEst( int nAdjust = (sqlite3LogEst(p->nSample) - sqlite3LogEst(nDiff)); pLoop->nOut -= nAdjust; *pbDone = 1; - WHERETRACE(0x10, ("range skip-scan regions: %u..%u adjust=%d est=%d\n", + WHERETRACE(0x20, ("range skip-scan regions: %u..%u adjust=%d est=%d\n", nLower, nUpper, nAdjust*-1, pLoop->nOut)); } @@ -161283,14 +168499,15 @@ static int whereRangeScanEst( ** sample, then assume they are 4x more selective. This brings ** the estimated selectivity more in line with what it would be ** if estimated without the use of STAT4 tables. */ - if( iLwrIdx==iUprIdx ) nNew -= 20; assert( 20==sqlite3LogEst(4) ); + if( iLwrIdx==iUprIdx ){ nNew -= 20; } + assert( 20==sqlite3LogEst(4) ); }else{ nNew = 10; assert( 10==sqlite3LogEst(2) ); } if( nNewwtFlags & TERM_VNULL)==0 ); + assert( pUpper==0 || (pUpper->wtFlags & TERM_VNULL)==0 || pParse->nErr>0 ); nNew = whereRangeAdjust(pLower, nOut); nNew = whereRangeAdjust(pUpper, nNew); @@ -161323,7 +168540,7 @@ static int whereRangeScanEst( if( nNewnOut>nOut ){ - WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n", + WHERETRACE(0x20,("Range scan lowers nOut from %d to %d\n", pLoop->nOut, nOut)); } #endif @@ -161388,7 +168605,7 @@ static int whereEqualScanEst( pBuilder->nRecValid = nEq; whereKeyStats(pParse, p, pRec, 0, a); - WHERETRACE(0x10,("equality scan regions %s(%d): %d\n", + WHERETRACE(0x20,("equality scan regions %s(%d): %d\n", p->zName, nEq-1, (int)a[1])); *pnRow = a[1]; @@ -161436,9 +168653,9 @@ static int whereInScanEst( } if( rc==SQLITE_OK ){ - if( nRowEst > nRow0 ) nRowEst = nRow0; + if( nRowEst > (tRowcnt)nRow0 ) nRowEst = nRow0; *pnRow = nRowEst; - WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst)); + WHERETRACE(0x20,("IN row estimate: est=%d\n", nRowEst)); } assert( pBuilder->nRecValid==nRecValid ); return rc; @@ -161507,17 +168724,34 @@ SQLITE_PRIVATE void sqlite3WhereClausePrint(WhereClause *pWC){ #ifdef WHERETRACE_ENABLED /* ** Print a WhereLoop object for debugging purposes -*/ -SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ - WhereInfo *pWInfo = pWC->pWInfo; - int nb = 1+(pWInfo->pTabList->nSrc+3)/4; - SrcItem *pItem = pWInfo->pTabList->a + p->iTab; - Table *pTab = pItem->pTab; - Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; - sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, - p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); - sqlite3DebugPrintf(" %12s", - pItem->zAlias ? pItem->zAlias : pTab->zName); +** +** Format example: +** +** .--- Position in WHERE clause rSetup, rRun, nOut ---. +** | | +** | .--- selfMask nTerm ------. | +** | | | | +** | | .-- prereq Idx wsFlags----. | | +** | | | Name | | | +** | | | __|__ nEq ---. ___|__ | __|__ +** | / \ / \ / \ | / \ / \ / \ +** 1.002.001 t2.t2xy 2 f 010241 N 2 cost 0,56,31 +*/ +SQLITE_PRIVATE void sqlite3WhereLoopPrint(const WhereLoop *p, const WhereClause *pWC){ + if( pWC ){ + WhereInfo *pWInfo = pWC->pWInfo; + int nb = 1+(pWInfo->pTabList->nSrc+3)/4; + SrcItem *pItem = pWInfo->pTabList->a + p->iTab; + Table *pTab = pItem->pTab; + Bitmask mAll = (((Bitmask)1)<<(nb*4)) - 1; + sqlite3DebugPrintf("%c%2d.%0*llx.%0*llx", p->cId, + p->iTab, nb, p->maskSelf, nb, p->prereq & mAll); + sqlite3DebugPrintf(" %12s", + pItem->zAlias ? pItem->zAlias : pTab->zName); + }else{ + sqlite3DebugPrintf("%c%2d.%03llx.%03llx %c%d", + p->cId, p->iTab, p->maskSelf, p->prereq & 0xfff, p->cId, p->iTab); + } if( (p->wsFlags & WHERE_VIRTUALTABLE)==0 ){ const char *zName; if( p->u.btree.pIndex && (zName = p->u.btree.pIndex->zName)!=0 ){ @@ -161547,13 +168781,22 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){ sqlite3DebugPrintf(" f %06x N %d", p->wsFlags, p->nLTerm); } sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut); - if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){ + if( p->nLTerm && (sqlite3WhereTrace & 0x4000)!=0 ){ int i; for(i=0; inLTerm; i++){ sqlite3WhereTermPrint(p->aLTerm[i], i); } } } +SQLITE_PRIVATE void sqlite3ShowWhereLoop(const WhereLoop *p){ + if( p ) sqlite3WhereLoopPrint(p, 0); +} +SQLITE_PRIVATE void sqlite3ShowWhereLoopList(const WhereLoop *p){ + while( p ){ + sqlite3ShowWhereLoop(p); + p = p->pNextLoop; + } +} #endif /* @@ -161585,12 +168828,18 @@ static void whereLoopClearUnion(sqlite3 *db, WhereLoop *p){ } /* -** Deallocate internal memory used by a WhereLoop object +** Deallocate internal memory used by a WhereLoop object. Leave the +** object in an initialized state, as if it had been newly allocated. */ static void whereLoopClear(sqlite3 *db, WhereLoop *p){ - if( p->aLTerm!=p->aLTermSpace ) sqlite3DbFreeNN(db, p->aLTerm); + if( p->aLTerm!=p->aLTermSpace ){ + sqlite3DbFreeNN(db, p->aLTerm); + p->aLTerm = p->aLTermSpace; + p->nLSlot = ArraySize(p->aLTermSpace); + } whereLoopClearUnion(db, p); - whereLoopInit(p); + p->nLTerm = 0; + p->wsFlags = 0; } /* @@ -161614,7 +168863,9 @@ static int whereLoopResize(sqlite3 *db, WhereLoop *p, int n){ */ static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){ whereLoopClearUnion(db, pTo); - if( whereLoopResize(db, pTo, pFrom->nLTerm) ){ + if( pFrom->nLTerm > pTo->nLSlot + && whereLoopResize(db, pTo, pFrom->nLTerm) + ){ memset(pTo, 0, WHERE_LOOP_XFER_SZ); return SQLITE_NOMEM_BKPT; } @@ -161632,8 +168883,9 @@ static int whereLoopXfer(sqlite3 *db, WhereLoop *pTo, WhereLoop *pFrom){ ** Delete a WhereLoop object */ static void whereLoopDelete(sqlite3 *db, WhereLoop *p){ + assert( db!=0 ); whereLoopClear(db, p); - sqlite3DbFreeNN(db, p); + sqlite3DbNNFreeNN(db, p); } /* @@ -161641,73 +168893,76 @@ static void whereLoopDelete(sqlite3 *db, WhereLoop *p){ */ static void whereInfoFree(sqlite3 *db, WhereInfo *pWInfo){ assert( pWInfo!=0 ); + assert( db!=0 ); sqlite3WhereClauseClear(&pWInfo->sWC); while( pWInfo->pLoops ){ WhereLoop *p = pWInfo->pLoops; pWInfo->pLoops = p->pNextLoop; whereLoopDelete(db, p); } - assert( pWInfo->pExprMods==0 ); while( pWInfo->pMemToFree ){ WhereMemBlock *pNext = pWInfo->pMemToFree->pNext; - sqlite3DbFreeNN(db, pWInfo->pMemToFree); + sqlite3DbNNFreeNN(db, pWInfo->pMemToFree); pWInfo->pMemToFree = pNext; } - sqlite3DbFreeNN(db, pWInfo); -} - -/* Undo all Expr node modifications -*/ -static void whereUndoExprMods(WhereInfo *pWInfo){ - while( pWInfo->pExprMods ){ - WhereExprMod *p = pWInfo->pExprMods; - pWInfo->pExprMods = p->pNext; - memcpy(p->pExpr, &p->orig, sizeof(p->orig)); - sqlite3DbFree(pWInfo->pParse->db, p); - } + sqlite3DbNNFreeNN(db, pWInfo); } /* -** Return TRUE if all of the following are true: +** Return TRUE if X is a proper subset of Y but is of equal or less cost. +** In other words, return true if all constraints of X are also part of Y +** and Y has additional constraints that might speed the search that X lacks +** but the cost of running X is not more than the cost of running Y. +** +** In other words, return true if the cost relationwship between X and Y +** is inverted and needs to be adjusted. +** +** Case 1: ** -** (1) X has the same or lower cost, or returns the same or fewer rows, -** than Y. -** (2) X uses fewer WHERE clause terms than Y -** (3) Every WHERE clause term used by X is also used by Y -** (4) X skips at least as many columns as Y -** (5) If X is a covering index, than Y is too +** (1a) X and Y use the same index. +** (1b) X has fewer == terms than Y +** (1c) Neither X nor Y use skip-scan +** (1d) X does not have a a greater cost than Y ** -** Conditions (2) and (3) mean that X is a "proper subset" of Y. -** If X is a proper subset of Y then Y is a better choice and ought -** to have a lower cost. This routine returns TRUE when that cost -** relationship is inverted and needs to be adjusted. Constraint (4) -** was added because if X uses skip-scan less than Y it still might -** deserve a lower cost even if it is a proper subset of Y. Constraint (5) -** was added because a covering index probably deserves to have a lower cost -** than a non-covering index even if it is a proper subset. +** Case 2: +** +** (2a) X has the same or lower cost, or returns the same or fewer rows, +** than Y. +** (2b) X uses fewer WHERE clause terms than Y +** (2c) Every WHERE clause term used by X is also used by Y +** (2d) X skips at least as many columns as Y +** (2e) If X is a covering index, than Y is too */ static int whereLoopCheaperProperSubset( const WhereLoop *pX, /* First WhereLoop to compare */ const WhereLoop *pY /* Compare against this WhereLoop */ ){ int i, j; + if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; /* (1d) and (2a) */ + assert( (pX->wsFlags & WHERE_VIRTUALTABLE)==0 ); + assert( (pY->wsFlags & WHERE_VIRTUALTABLE)==0 ); + if( pX->u.btree.nEq < pY->u.btree.nEq /* (1b) */ + && pX->u.btree.pIndex==pY->u.btree.pIndex /* (1a) */ + && pX->nSkip==0 && pY->nSkip==0 /* (1c) */ + ){ + return 1; /* Case 1 is true */ + } if( pX->nLTerm-pX->nSkip >= pY->nLTerm-pY->nSkip ){ - return 0; /* X is not a subset of Y */ + return 0; /* (2b) */ } - if( pX->rRun>pY->rRun && pX->nOut>pY->nOut ) return 0; - if( pY->nSkip > pX->nSkip ) return 0; + if( pY->nSkip > pX->nSkip ) return 0; /* (2d) */ for(i=pX->nLTerm-1; i>=0; i--){ if( pX->aLTerm[i]==0 ) continue; for(j=pY->nLTerm-1; j>=0; j--){ if( pY->aLTerm[j]==pX->aLTerm[i] ) break; } - if( j<0 ) return 0; /* X not a subset of Y since term X[i] not used by Y */ + if( j<0 ) return 0; /* (2c) */ } if( (pX->wsFlags&WHERE_IDX_ONLY)!=0 && (pY->wsFlags&WHERE_IDX_ONLY)==0 ){ - return 0; /* Constraint (5) */ + return 0; /* (2e) */ } - return 1; /* All conditions meet */ + return 1; /* Case 2 is true */ } /* @@ -161788,7 +169043,7 @@ static WhereLoop **whereLoopFindLesser( ** rSetup. Call this SETUP-INVARIANT */ assert( p->rSetup>=pTemplate->rSetup ); - /* Any loop using an appliation-defined index (or PRIMARY KEY or + /* Any loop using an application-defined index (or PRIMARY KEY or ** UNIQUE constraint) with one or more == constraints is better ** than an automatic index. Unless it is a skip-scan. */ if( (p->wsFlags & WHERE_AUTO_INDEX)!=0 @@ -161815,7 +169070,7 @@ static WhereLoop **whereLoopFindLesser( /* If pTemplate is always better than p, then cause p to be overwritten ** with pTemplate. pTemplate is better than p if: - ** (1) pTemplate has no more dependences than p, and + ** (1) pTemplate has no more dependencies than p, and ** (2) pTemplate has an equal or lower cost than p. */ if( (p->prereq & pTemplate->prereq)==pTemplate->prereq /* (1) */ @@ -161933,7 +169188,7 @@ static int whereLoopInsert(WhereLoopBuilder *pBuilder, WhereLoop *pTemplate){ }else{ /* We will be overwriting WhereLoop p[]. But before we do, first ** go through the rest of the list and delete any other entries besides - ** p[] that are also supplated by pTemplate */ + ** p[] that are also supplanted by pTemplate */ WhereLoop **ppTail = &p->pNextLoop; WhereLoop *pToDel; while( *ppTail ){ @@ -162013,6 +169268,7 @@ static void whereLoopOutputAdjust( if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break; } if( j<0 ){ + sqlite3ProgressCheck(pWC->pWInfo->pParse); if( pLoop->maskSelf==pTerm->prereqAll ){ /* If there are extra terms in the WHERE clause not used by an index ** that depend only on the table being scanned, and that will tend to @@ -162132,7 +169388,7 @@ static int whereRangeVectorLen( } /* -** Adjust the cost C by the costMult facter T. This only occurs if +** Adjust the cost C by the costMult factor T. This only occurs if ** compiled with -DSQLITE_ENABLE_COSTMULT */ #ifdef SQLITE_ENABLE_COSTMULT @@ -162159,7 +169415,7 @@ static int whereLoopAddBtreeIndex( Index *pProbe, /* An index on pSrc */ LogEst nInMul /* log(Number of iterations due to IN) */ ){ - WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyse context */ + WhereInfo *pWInfo = pBuilder->pWInfo; /* WHERE analyze context */ Parse *pParse = pWInfo->pParse; /* Parsing context */ sqlite3 *db = pParse->db; /* Database connection malloc context */ WhereLoop *pNew; /* Template WhereLoop under construction */ @@ -162180,7 +169436,10 @@ static int whereLoopAddBtreeIndex( WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */ pNew = pBuilder->pNew; - if( db->mallocFailed ) return SQLITE_NOMEM_BKPT; + assert( db->mallocFailed==0 || pParse->nErr>0 ); + if( pParse->nErr ){ + return pParse->rc; + } WHERETRACE(0x800, ("BEGIN %s.addBtreeIdx(%s), nEq=%d, nSkip=%d, rRun=%d\n", pProbe->pTable->zName,pProbe->zName, pNew->u.btree.nEq, pNew->nSkip, pNew->rRun)); @@ -162193,7 +169452,10 @@ static int whereLoopAddBtreeIndex( assert( pNew->u.btree.nBtm==0 ); opMask = WO_EQ|WO_IN|WO_GT|WO_GE|WO_LT|WO_LE|WO_ISNULL|WO_IS; } - if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); + if( pProbe->bUnordered || pProbe->bLowQual ){ + if( pProbe->bUnordered ) opMask &= ~(WO_GT|WO_GE|WO_LT|WO_LE); + if( pProbe->bLowQual ) opMask &= ~(WO_EQ|WO_IN|WO_IS); + } assert( pNew->u.btree.nEqnColumn ); assert( pNew->u.btree.nEqnKeyCol @@ -162231,32 +169493,11 @@ static int whereLoopAddBtreeIndex( ** to mix with a lower range bound from some other source */ if( pTerm->wtFlags & TERM_LIKEOPT && pTerm->eOperator==WO_LT ) continue; - /* tag-20191211-001: Do not allow constraints from the WHERE clause to - ** be used by the right table of a LEFT JOIN nor by the left table of a - ** RIGHT JOIN. Only constraints in the ON clause are allowed. - ** See tag-20191211-002 for the vtab equivalent. - ** - ** 2022-06-06: See https://sqlite.org/forum/forumpost/206d99a16dd9212f - ** for an example of a WHERE clause constraints that may not be used on - ** the right table of a RIGHT JOIN because the constraint implies a - ** not-NULL condition on the left table of the RIGHT JOIN. - ** - ** 2022-06-10: The same condition applies to termCanDriveIndex() above. - ** https://sqlite.org/forum/forumpost/51e6959f61 - */ - if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 ){ - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LEFT ); - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_RIGHT ); - testcase( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))==JT_LTORJ ); - testcase( ExprHasProperty(pTerm->pExpr, EP_OuterON) ) - testcase( ExprHasProperty(pTerm->pExpr, EP_InnerON) ); - if( !ExprHasProperty(pTerm->pExpr, EP_OuterON|EP_InnerON) - || pTerm->pExpr->w.iJoin != pSrc->iCursor - ){ - continue; - } + if( (pSrc->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0 + && !constraintCompatibleWithOuterJoin(pTerm,pSrc) + ){ + continue; } - if( IsUniqueIndex(pProbe) && saved_nEq==pProbe->nKeyCol-1 ){ pBuilder->bldFlags1 |= SQLITE_BLDF1_UNIQUE; }else{ @@ -162267,7 +169508,11 @@ static int whereLoopAddBtreeIndex( pNew->u.btree.nBtm = saved_nBtm; pNew->u.btree.nTop = saved_nTop; pNew->nLTerm = saved_nLTerm; - if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ + if( pNew->nLTerm>=pNew->nLSlot + && whereLoopResize(db, pNew, pNew->nLTerm+1) + ){ + break; /* OOM while trying to enlarge the pNew->aLTerm array */ + } pNew->aLTerm[pNew->nLTerm++] = pTerm; pNew->prereq = (saved_prereq | pTerm->prereqRight) & ~pNew->maskSelf; @@ -162360,38 +169605,39 @@ static int whereLoopAddBtreeIndex( if( scan.iEquiv>1 ) pNew->wsFlags |= WHERE_TRANSCONS; }else if( eOp & WO_ISNULL ){ pNew->wsFlags |= WHERE_COLUMN_NULL; - }else if( eOp & (WO_GT|WO_GE) ){ - testcase( eOp & WO_GT ); - testcase( eOp & WO_GE ); - pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT; - pNew->u.btree.nBtm = whereRangeVectorLen( - pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm - ); - pBtm = pTerm; - pTop = 0; - if( pTerm->wtFlags & TERM_LIKEOPT ){ - /* Range constraints that come from the LIKE optimization are - ** always used in pairs. */ - pTop = &pTerm[1]; - assert( (pTop-(pTerm->pWC->a))pWC->nTerm ); - assert( pTop->wtFlags & TERM_LIKEOPT ); - assert( pTop->eOperator==WO_LT ); - if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ - pNew->aLTerm[pNew->nLTerm++] = pTop; - pNew->wsFlags |= WHERE_TOP_LIMIT; - pNew->u.btree.nTop = 1; - } - }else{ - assert( eOp & (WO_LT|WO_LE) ); - testcase( eOp & WO_LT ); - testcase( eOp & WO_LE ); - pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT; - pNew->u.btree.nTop = whereRangeVectorLen( + }else{ + int nVecLen = whereRangeVectorLen( pParse, pSrc->iCursor, pProbe, saved_nEq, pTerm ); - pTop = pTerm; - pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ? - pNew->aLTerm[pNew->nLTerm-2] : 0; + if( eOp & (WO_GT|WO_GE) ){ + testcase( eOp & WO_GT ); + testcase( eOp & WO_GE ); + pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_BTM_LIMIT; + pNew->u.btree.nBtm = nVecLen; + pBtm = pTerm; + pTop = 0; + if( pTerm->wtFlags & TERM_LIKEOPT ){ + /* Range constraints that come from the LIKE optimization are + ** always used in pairs. */ + pTop = &pTerm[1]; + assert( (pTop-(pTerm->pWC->a))pWC->nTerm ); + assert( pTop->wtFlags & TERM_LIKEOPT ); + assert( pTop->eOperator==WO_LT ); + if( whereLoopResize(db, pNew, pNew->nLTerm+1) ) break; /* OOM */ + pNew->aLTerm[pNew->nLTerm++] = pTop; + pNew->wsFlags |= WHERE_TOP_LIMIT; + pNew->u.btree.nTop = 1; + } + }else{ + assert( eOp & (WO_LT|WO_LE) ); + testcase( eOp & WO_LT ); + testcase( eOp & WO_LE ); + pNew->wsFlags |= WHERE_COLUMN_RANGE|WHERE_TOP_LIMIT; + pNew->u.btree.nTop = nVecLen; + pTop = pTerm; + pBtm = (pNew->wsFlags & WHERE_BTM_LIMIT)!=0 ? + pNew->aLTerm[pNew->nLTerm-2] : 0; + } } /* At this point pNew->nOut is set to the number of rows expected to @@ -162443,7 +169689,7 @@ static int whereLoopAddBtreeIndex( && pNew->nOut+10 > pProbe->aiRowLogEst[0] ){ #if WHERETRACE_ENABLED /* 0x01 */ - if( sqlite3WhereTrace & 0x01 ){ + if( sqlite3WhereTrace & 0x20 ){ sqlite3DebugPrintf( "STAT4 determines term has low selectivity:\n"); sqlite3WhereTermPrint(pTerm, 999); @@ -162480,9 +169726,17 @@ static int whereLoopAddBtreeIndex( ** seek only. Then, if this is a non-covering index, add the cost of ** visiting the rows in the main table. */ assert( pSrc->pTab->szTabRow>0 ); - rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){ + /* The pProbe->szIdxRow is low for an IPK table since the interior + ** pages are small. Thus szIdxRow gives a good estimate of seek cost. + ** But the leaf pages are full-size, so pProbe->szIdxRow would badly + ** under-estimate the scanning cost. */ + rCostIdx = pNew->nOut + 16; + }else{ + rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow; + } pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx); - if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){ + if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK|WHERE_EXPRIDX))==0 ){ pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16); } ApplyCostMultiplier(pNew->rRun, pProbe->pTable->costMult); @@ -162504,6 +169758,9 @@ static int whereLoopAddBtreeIndex( && (pNew->u.btree.nEqnKeyCol || pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY) ){ + if( pNew->u.btree.nEq>3 ){ + sqlite3ProgressCheck(pParse); + } whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn); } pNew->nOut = saved_nOut; @@ -162635,6 +169892,243 @@ static int whereUsablePartialIndex( return 0; } +/* +** pIdx is an index containing expressions. Check it see if any of the +** expressions in the index match the pExpr expression. +*/ +static int exprIsCoveredByIndex( + const Expr *pExpr, + const Index *pIdx, + int iTabCur +){ + int i; + for(i=0; inColumn; i++){ + if( pIdx->aiColumn[i]==XN_EXPR + && sqlite3ExprCompare(0, pExpr, pIdx->aColExpr->a[i].pExpr, iTabCur)==0 + ){ + return 1; + } + } + return 0; +} + +/* +** Structure passed to the whereIsCoveringIndex Walker callback. +*/ +typedef struct CoveringIndexCheck CoveringIndexCheck; +struct CoveringIndexCheck { + Index *pIdx; /* The index */ + int iTabCur; /* Cursor number for the corresponding table */ + u8 bExpr; /* Uses an indexed expression */ + u8 bUnidx; /* Uses an unindexed column not within an indexed expr */ +}; + +/* +** Information passed in is pWalk->u.pCovIdxCk. Call it pCk. +** +** If the Expr node references the table with cursor pCk->iTabCur, then +** make sure that column is covered by the index pCk->pIdx. We know that +** all columns less than 63 (really BMS-1) are covered, so we don't need +** to check them. But we do need to check any column at 63 or greater. +** +** If the index does not cover the column, then set pWalk->eCode to +** non-zero and return WRC_Abort to stop the search. +** +** If this node does not disprove that the index can be a covering index, +** then just return WRC_Continue, to continue the search. +** +** If pCk->pIdx contains indexed expressions and one of those expressions +** matches pExpr, then prune the search. +*/ +static int whereIsCoveringIndexWalkCallback(Walker *pWalk, Expr *pExpr){ + int i; /* Loop counter */ + const Index *pIdx; /* The index of interest */ + const i16 *aiColumn; /* Columns contained in the index */ + u16 nColumn; /* Number of columns in the index */ + CoveringIndexCheck *pCk; /* Info about this search */ + + pCk = pWalk->u.pCovIdxCk; + pIdx = pCk->pIdx; + if( (pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN) ){ + /* if( pExpr->iColumn<(BMS-1) && pIdx->bHasExpr==0 ) return WRC_Continue;*/ + if( pExpr->iTable!=pCk->iTabCur ) return WRC_Continue; + pIdx = pWalk->u.pCovIdxCk->pIdx; + aiColumn = pIdx->aiColumn; + nColumn = pIdx->nColumn; + for(i=0; iiColumn ) return WRC_Continue; + } + pCk->bUnidx = 1; + return WRC_Abort; + }else if( pIdx->bHasExpr + && exprIsCoveredByIndex(pExpr, pIdx, pWalk->u.pCovIdxCk->iTabCur) ){ + pCk->bExpr = 1; + return WRC_Prune; + } + return WRC_Continue; +} + + +/* +** pIdx is an index that covers all of the low-number columns used by +** pWInfo->pSelect (columns from 0 through 62) or an index that has +** expressions terms. Hence, we cannot determine whether or not it is +** a covering index by using the colUsed bitmasks. We have to do a search +** to see if the index is covering. This routine does that search. +** +** The return value is one of these: +** +** 0 The index is definitely not a covering index +** +** WHERE_IDX_ONLY The index is definitely a covering index +** +** WHERE_EXPRIDX The index is likely a covering index, but it is +** difficult to determine precisely because of the +** expressions that are indexed. Score it as a +** covering index, but still keep the main table open +** just in case we need it. +** +** This routine is an optimization. It is always safe to return zero. +** But returning one of the other two values when zero should have been +** returned can lead to incorrect bytecode and assertion faults. +*/ +static SQLITE_NOINLINE u32 whereIsCoveringIndex( + WhereInfo *pWInfo, /* The WHERE clause context */ + Index *pIdx, /* Index that is being tested */ + int iTabCur /* Cursor for the table being indexed */ +){ + int i, rc; + struct CoveringIndexCheck ck; + Walker w; + if( pWInfo->pSelect==0 ){ + /* We don't have access to the full query, so we cannot check to see + ** if pIdx is covering. Assume it is not. */ + return 0; + } + if( pIdx->bHasExpr==0 ){ + for(i=0; inColumn; i++){ + if( pIdx->aiColumn[i]>=BMS-1 ) break; + } + if( i>=pIdx->nColumn ){ + /* pIdx does not index any columns greater than 62, but we know from + ** colMask that columns greater than 62 are used, so this is not a + ** covering index */ + return 0; + } + } + ck.pIdx = pIdx; + ck.iTabCur = iTabCur; + ck.bExpr = 0; + ck.bUnidx = 0; + memset(&w, 0, sizeof(w)); + w.xExprCallback = whereIsCoveringIndexWalkCallback; + w.xSelectCallback = sqlite3SelectWalkNoop; + w.u.pCovIdxCk = &ck; + sqlite3WalkSelect(&w, pWInfo->pSelect); + if( ck.bUnidx ){ + rc = 0; + }else if( ck.bExpr ){ + rc = WHERE_EXPRIDX; + }else{ + rc = WHERE_IDX_ONLY; + } + return rc; +} + +/* +** This is an sqlite3ParserAddCleanup() callback that is invoked to +** free the Parse->pIdxEpr list when the Parse object is destroyed. +*/ +static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){ + IndexedExpr **pp = (IndexedExpr**)pObject; + while( *pp!=0 ){ + IndexedExpr *p = *pp; + *pp = p->pIENext; + sqlite3ExprDelete(db, p->pExpr); + sqlite3DbFreeNN(db, p); + } +} + +/* +** This function is called for a partial index - one with a WHERE clause - in +** two scenarios. In both cases, it determines whether or not the WHERE +** clause on the index implies that a column of the table may be safely +** replaced by a constant expression. For example, in the following +** SELECT: +** +** CREATE INDEX i1 ON t1(b, c) WHERE a=; +** SELECT a, b, c FROM t1 WHERE a= AND b=?; +** +** The "a" in the select-list may be replaced by , iff: +** +** (a) is a constant expression, and +** (b) The (a=) comparison uses the BINARY collation sequence, and +** (c) Column "a" has an affinity other than NONE or BLOB. +** +** If argument pItem is NULL, then pMask must not be NULL. In this case this +** function is being called as part of determining whether or not pIdx +** is a covering index. This function clears any bits in (*pMask) +** corresponding to columns that may be replaced by constants as described +** above. +** +** Otherwise, if pItem is not NULL, then this function is being called +** as part of coding a loop that uses index pIdx. In this case, add entries +** to the Parse.pIdxPartExpr list for each column that can be replaced +** by a constant. +*/ +static void wherePartIdxExpr( + Parse *pParse, /* Parse context */ + Index *pIdx, /* Partial index being processed */ + Expr *pPart, /* WHERE clause being processed */ + Bitmask *pMask, /* Mask to clear bits in */ + int iIdxCur, /* Cursor number for index */ + SrcItem *pItem /* The FROM clause entry for the table */ +){ + assert( pItem==0 || (pItem->fg.jointype & JT_RIGHT)==0 ); + assert( (pItem==0 || pMask==0) && (pMask!=0 || pItem!=0) ); + + if( pPart->op==TK_AND ){ + wherePartIdxExpr(pParse, pIdx, pPart->pRight, pMask, iIdxCur, pItem); + pPart = pPart->pLeft; + } + + if( (pPart->op==TK_EQ || pPart->op==TK_IS) ){ + Expr *pLeft = pPart->pLeft; + Expr *pRight = pPart->pRight; + u8 aff; + + if( pLeft->op!=TK_COLUMN ) return; + if( !sqlite3ExprIsConstant(pRight) ) return; + if( !sqlite3IsBinary(sqlite3ExprCompareCollSeq(pParse, pPart)) ) return; + if( pLeft->iColumn<0 ) return; + aff = pIdx->pTable->aCol[pLeft->iColumn].affinity; + if( aff>=SQLITE_AFF_TEXT ){ + if( pItem ){ + sqlite3 *db = pParse->db; + IndexedExpr *p = (IndexedExpr*)sqlite3DbMallocRaw(db, sizeof(*p)); + if( p ){ + int bNullRow = (pItem->fg.jointype&(JT_LEFT|JT_LTORJ))!=0; + p->pExpr = sqlite3ExprDup(db, pRight, 0); + p->iDataCur = pItem->iCursor; + p->iIdxCur = iIdxCur; + p->iIdxCol = pLeft->iColumn; + p->bMaybeNullRow = bNullRow; + p->pIENext = pParse->pIdxPartExpr; + p->aff = aff; + pParse->pIdxPartExpr = p; + if( p->pIENext==0 ){ + void *pArg = (void*)&pParse->pIdxPartExpr; + sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg); + } + } + }else if( pLeft->iColumn<(BMS-1) ){ + *pMask &= ~((Bitmask)1 << pLeft->iColumn); + } + } + } +} + + /* ** Add all WhereLoop objects for a single table of the join where the table ** is identified by pBuilder->pNew->iTab. That table is guaranteed to be @@ -162673,7 +170167,7 @@ static int whereUsablePartialIndex( */ static int whereLoopAddBtree( WhereLoopBuilder *pBuilder, /* WHERE clause information */ - Bitmask mPrereq /* Extra prerequesites for using this table */ + Bitmask mPrereq /* Extra prerequisites for using this table */ ){ WhereInfo *pWInfo; /* WHERE analysis context */ Index *pProbe; /* An index we are evaluating */ @@ -162717,7 +170211,7 @@ static int whereLoopAddBtree( sPk.aiRowLogEst = aiRowEstPk; sPk.onError = OE_Replace; sPk.pTable = pTab; - sPk.szIdxRow = pTab->szTabRow; + sPk.szIdxRow = 3; /* TUNING: Interior rows of IPK table are very small */ sPk.idxType = SQLITE_IDXTYPE_IPK; aiRowEstPk[0] = pTab->nRowLogEst; aiRowEstPk[1] = 0; @@ -162768,7 +170262,8 @@ static int whereLoopAddBtree( if( !IsView(pTab) && (pTab->tabFlags & TF_Ephemeral)==0 ){ pNew->rSetup += 28; }else{ - pNew->rSetup -= 10; + pNew->rSetup -= 25; /* Greatly reduced setup cost for auto indexes + ** on ephemeral materializations of views */ } ApplyCostMultiplier(pNew->rSetup, pTab->costMult); if( pNew->rSetup<0 ) pNew->rSetup = 0; @@ -162845,11 +170340,43 @@ static int whereLoopAddBtree( }else{ Bitmask m; if( pProbe->isCovering ){ - pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; m = 0; + pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; }else{ m = pSrc->colUsed & pProbe->colNotIdxed; - pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED; + if( pProbe->pPartIdxWhere ){ + wherePartIdxExpr( + pWInfo->pParse, pProbe, pProbe->pPartIdxWhere, &m, 0, 0 + ); + } + pNew->wsFlags = WHERE_INDEXED; + if( m==TOPBIT || (pProbe->bHasExpr && !pProbe->bHasVCol && m!=0) ){ + u32 isCov = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor); + if( isCov==0 ){ + WHERETRACE(0x200, + ("-> %s is not a covering index" + " according to whereIsCoveringIndex()\n", pProbe->zName)); + assert( m!=0 ); + }else{ + m = 0; + pNew->wsFlags |= isCov; + if( isCov & WHERE_IDX_ONLY ){ + WHERETRACE(0x200, + ("-> %s is a covering expression index" + " according to whereIsCoveringIndex()\n", pProbe->zName)); + }else{ + assert( isCov==WHERE_EXPRIDX ); + WHERETRACE(0x200, + ("-> %s might be a covering expression index" + " according to whereIsCoveringIndex()\n", pProbe->zName)); + } + } + }else if( m==0 ){ + WHERETRACE(0x200, + ("-> %s a covering index according to bitmasks\n", + pProbe->zName, m==0 ? "is" : "is not")); + pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED; + } } /* Full scan via index */ @@ -163022,7 +170549,7 @@ static int whereLoopAddVirtualOne( ** that the particular combination of parameters provided is unusable. ** Make no entries in the loop table. */ - WHERETRACE(0xffff, (" ^^^^--- non-viable plan rejected!\n")); + WHERETRACE(0xffffffff, (" ^^^^--- non-viable plan rejected!\n")); return SQLITE_OK; } return rc; @@ -163133,7 +170660,7 @@ static int whereLoopAddVirtualOne( sqlite3_free(pNew->u.vtab.idxStr); pNew->u.vtab.needFree = 0; } - WHERETRACE(0xffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n", + WHERETRACE(0xffffffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n", *pbIn, (sqlite3_uint64)mPrereq, (sqlite3_uint64)(pNew->prereq & ~mPrereq))); @@ -163149,7 +170676,7 @@ static int whereLoopAddVirtualOne( ** ** Return a pointer to the collation name: ** -** 1. If there is an explicit COLLATE operator on the constaint, return it. +** 1. If there is an explicit COLLATE operator on the constraint, return it. ** ** 2. Else, if the column has an alternative collation, return that. ** @@ -163204,7 +170731,7 @@ SQLITE_API int sqlite3_vtab_rhs_value( sqlite3_value *pVal = 0; int rc = SQLITE_OK; if( iCons<0 || iCons>=pIdxInfo->nConstraint ){ - rc = SQLITE_MISUSE; /* EV: R-30545-25046 */ + rc = SQLITE_MISUSE_BKPT; /* EV: R-30545-25046 */ }else{ if( pH->aRhs[iCons]==0 ){ WhereTerm *pTerm = &pH->pWC->a[pIdxInfo->aConstraint[iCons].iTermOffset]; @@ -163234,32 +170761,27 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info *pIdxInfo){ return pHidden->eDistinct; } -#if (defined(SQLITE_ENABLE_DBPAGE_VTAB) || defined(SQLITE_TEST)) \ - && !defined(SQLITE_OMIT_VIRTUALTABLE) /* ** Cause the prepared statement that is associated with a call to -** xBestIndex to potentiall use all schemas. If the statement being +** xBestIndex to potentially use all schemas. If the statement being ** prepared is read-only, then just start read transactions on all ** schemas. But if this is a write operation, start writes on all ** schemas. ** ** This is used by the (built-in) sqlite_dbpage virtual table. */ -SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info *pIdxInfo){ - HiddenIndexInfo *pHidden = (HiddenIndexInfo*)&pIdxInfo[1]; - Parse *pParse = pHidden->pParse; +SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(Parse *pParse){ int nDb = pParse->db->nDb; int i; for(i=0; iwriteMask ){ + if( DbMaskNonZero(pParse->writeMask) ){ for(i=0; ipTab->zName)); - WHERETRACE(0x40, (" VirtualOne: all usable\n")); + WHERETRACE(0x800, (" VirtualOne: all usable\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, &bRetry ); @@ -163350,7 +170872,7 @@ static int whereLoopAddVirtual( /* If the plan produced by the earlier call uses an IN(...) term, call ** xBestIndex again, this time with IN(...) terms disabled. */ if( bIn ){ - WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n")); + WHERETRACE(0x800, (" VirtualOne: all usable w/o IN\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn, 0); assert( bIn==0 ); @@ -163376,7 +170898,7 @@ static int whereLoopAddVirtual( mPrev = mNext; if( mNext==ALLBITS ) break; if( mNext==mBest || mNext==mBestNoIn ) continue; - WHERETRACE(0x40, (" VirtualOne: mPrev=%04llx mNext=%04llx\n", + WHERETRACE(0x800, (" VirtualOne: mPrev=%04llx mNext=%04llx\n", (sqlite3_uint64)mPrev, (sqlite3_uint64)mNext)); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, mNext|mPrereq, 0, p, mNoOmit, &bIn, 0); @@ -163390,7 +170912,7 @@ static int whereLoopAddVirtual( ** that requires no source tables at all (i.e. one guaranteed to be ** usable), make a call here with all source tables disabled */ if( rc==SQLITE_OK && seenZero==0 ){ - WHERETRACE(0x40, (" VirtualOne: all disabled\n")); + WHERETRACE(0x800, (" VirtualOne: all disabled\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn, 0); if( bIn==0 ) seenZeroNoIN = 1; @@ -163400,7 +170922,7 @@ static int whereLoopAddVirtual( ** that requires no source tables at all and does not use an IN(...) ** operator, make a final call to obtain one here. */ if( rc==SQLITE_OK && seenZeroNoIN==0 ){ - WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n")); + WHERETRACE(0x800, (" VirtualOne: all disabled and w/o IN\n")); rc = whereLoopAddVirtualOne( pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn, 0); } @@ -163456,7 +170978,7 @@ static int whereLoopAddOr( sSubBuild = *pBuilder; sSubBuild.pOrSet = &sCur; - WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm)); + WHERETRACE(0x400, ("Begin processing OR-clause %p\n", pTerm)); for(pOrTerm=pOrWC->a; pOrTermeOperator & WO_AND)!=0 ){ sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc; @@ -163473,9 +170995,9 @@ static int whereLoopAddOr( } sCur.n = 0; #ifdef WHERETRACE_ENABLED - WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n", + WHERETRACE(0x400, ("OR-term %d of %p has %d subterms:\n", (int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm)); - if( sqlite3WhereTrace & 0x400 ){ + if( sqlite3WhereTrace & 0x20000 ){ sqlite3WhereClausePrint(sSubBuild.pWC); } #endif @@ -163490,8 +171012,6 @@ static int whereLoopAddOr( if( rc==SQLITE_OK ){ rc = whereLoopAddOr(&sSubBuild, mPrereq, mUnusable); } - assert( rc==SQLITE_OK || rc==SQLITE_DONE || sCur.n==0 - || rc==SQLITE_NOMEM ); testcase( rc==SQLITE_NOMEM && sCur.n>0 ); testcase( rc==SQLITE_DONE ); if( sCur.n==0 ){ @@ -163537,7 +171057,7 @@ static int whereLoopAddOr( pNew->prereq = sSum.a[i].prereq; rc = whereLoopInsert(pBuilder, pNew); } - WHERETRACE(0x200, ("End processing OR-clause %p\n", pTerm)); + WHERETRACE(0x400, ("End processing OR-clause %p\n", pTerm)); } } return rc; @@ -163563,7 +171083,13 @@ static int whereLoopAddAll(WhereLoopBuilder *pBuilder){ /* Loop over the tables in the join, from left to right */ pNew = pBuilder->pNew; - whereLoopInit(pNew); + + /* Verify that pNew has already been initialized */ + assert( pNew->nLTerm==0 ); + assert( pNew->wsFlags==0 ); + assert( pNew->nLSlot>=ArraySize(pNew->aLTermSpace) ); + assert( pNew->aLTerm!=0 ); + pBuilder->iPlanLimit = SQLITE_QUERY_PLANNER_LIMIT; for(iTab=0, pItem=pTabList->a; pItemiTable!=iCur ) continue; if( pOBExpr->iColumn!=iColumn ) continue; }else{ - Expr *pIdxExpr = pIndex->aColExpr->a[j].pExpr; - if( sqlite3ExprCompareSkip(pOBExpr, pIdxExpr, iCur) ){ + Expr *pIxExpr = pIndex->aColExpr->a[j].pExpr; + if( sqlite3ExprCompareSkip(pOBExpr, pIxExpr, iCur) ){ continue; } } @@ -164012,37 +171538,56 @@ static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){ ** order. */ static LogEst whereSortingCost( - WhereInfo *pWInfo, - LogEst nRow, - int nOrderBy, - int nSorted + WhereInfo *pWInfo, /* Query planning context */ + LogEst nRow, /* Estimated number of rows to sort */ + int nOrderBy, /* Number of ORDER BY clause terms */ + int nSorted /* Number of initial ORDER BY terms naturally in order */ ){ - /* TUNING: Estimated cost of a full external sort, where N is + /* Estimated cost of a full external sort, where N is ** the number of rows to sort is: ** - ** cost = (3.0 * N * log(N)). + ** cost = (K * N * log(N)). ** ** Or, if the order-by clause has X terms but only the last Y ** terms are out of order, then block-sorting will reduce the ** sorting cost to: ** - ** cost = (3.0 * N * log(N)) * (Y/X) + ** cost = (K * N * log(N)) * (Y/X) + ** + ** The constant K is at least 2.0 but will be larger if there are a + ** large number of columns to be sorted, as the sorting time is + ** proportional to the amount of content to be sorted. The algorithm + ** does not currently distinguish between fat columns (BLOBs and TEXTs) + ** and skinny columns (INTs). It just uses the number of columns as + ** an approximation for the row width. ** - ** The (Y/X) term is implemented using stack variable rScale - ** below. + ** And extra factor of 2.0 or 3.0 is added to the sorting cost if the sort + ** is built using OP_IdxInsert and OP_Sort rather than with OP_SorterInsert. */ - LogEst rScale, rSortCost; - assert( nOrderBy>0 && 66==sqlite3LogEst(100) ); - rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66; - rSortCost = nRow + rScale + 16; + LogEst rSortCost, nCol; + assert( pWInfo->pSelect!=0 ); + assert( pWInfo->pSelect->pEList!=0 ); + /* TUNING: sorting cost proportional to the number of output columns: */ + nCol = sqlite3LogEst((pWInfo->pSelect->pEList->nExpr+59)/30); + rSortCost = nRow + nCol; + if( nSorted>0 ){ + /* Scale the result by (Y/X) */ + rSortCost += sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66; + } /* Multiple by log(M) where M is the number of output rows. ** Use the LIMIT for M if it is smaller. Or if this sort is for ** a DISTINCT operator, M will be the number of distinct output ** rows, so fudge it downwards a bit. */ - if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 && pWInfo->iLimitiLimit; + if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 ){ + rSortCost += 10; /* TUNING: Extra 2.0x if using LIMIT */ + if( nSorted!=0 ){ + rSortCost += 6; /* TUNING: Extra 1.5x if also using partial sort */ + } + if( pWInfo->iLimitiLimit; + } }else if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT) ){ /* TUNING: In the sort for a DISTINCT operator, assume that the DISTINCT ** reduces the number of output rows by a factor of 2 */ @@ -164068,7 +171613,6 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ int mxChoice; /* Maximum number of simultaneous paths tracked */ int nLoop; /* Number of terms in the join */ Parse *pParse; /* Parsing context */ - sqlite3 *db; /* The database connection */ int iLoop; /* Loop counter over the terms of the join */ int ii, jj; /* Loop counters */ int mxI = 0; /* Index of next entry to replace */ @@ -164087,14 +171631,14 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ int nSpace; /* Bytes of space allocated at pSpace */ pParse = pWInfo->pParse; - db = pParse->db; nLoop = pWInfo->nLevel; /* TUNING: For simple queries, only the best path is tracked. ** For 2-way joins, the 5 best paths are followed. ** For joins of 3 or more tables, track the 10 best paths */ mxChoice = (nLoop<=1) ? 1 : (nLoop==2 ? 5 : 10); assert( nLoop<=pWInfo->pTabList->nSrc ); - WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d)\n", nRowEst)); + WHERETRACE(0x002, ("---- begin solver. (nRowEst=%d, nQueryLoop=%d)\n", + nRowEst, pParse->nQueryLoop)); /* If nRowEst is zero and there is an ORDER BY clause, ignore it. In this ** case the purpose of this call is to estimate the number of rows returned @@ -164110,7 +171654,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ /* Allocate and initialize space for aTo, aFrom and aSortCost[] */ nSpace = (sizeof(WherePath)+sizeof(WhereLoop*)*nLoop)*mxChoice*2; nSpace += sizeof(LogEst) * nOrderBy; - pSpace = sqlite3DbMallocRawNN(db, nSpace); + pSpace = sqlite3StackAllocRawNN(pParse->db, nSpace); if( pSpace==0 ) return SQLITE_NOMEM_BKPT; aTo = (WherePath*)pSpace; aFrom = aTo+mxChoice; @@ -164160,9 +171704,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ LogEst nOut; /* Rows visited by (pFrom+pWLoop) */ LogEst rCost; /* Cost of path (pFrom+pWLoop) */ LogEst rUnsorted; /* Unsorted cost of (pFrom+pWLoop) */ - i8 isOrdered = pFrom->isOrdered; /* isOrdered for (pFrom+pWLoop) */ + i8 isOrdered; /* isOrdered for (pFrom+pWLoop) */ Bitmask maskNew; /* Mask of src visited by (..) */ - Bitmask revMask = 0; /* Mask of rev-order loops for (..) */ + Bitmask revMask; /* Mask of rev-order loops for (..) */ if( (pWLoop->prereq & ~pFrom->maskLoop)!=0 ) continue; if( (pWLoop->maskSelf & pFrom->maskLoop)!=0 ) continue; @@ -164181,7 +171725,9 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ rUnsorted = sqlite3LogEstAdd(rUnsorted, pFrom->rUnsorted); nOut = pFrom->nRow + pWLoop->nOut; maskNew = pFrom->maskLoop | pWLoop->maskSelf; + isOrdered = pFrom->isOrdered; if( isOrdered<0 ){ + revMask = 0; isOrdered = wherePathSatisfiesOrderBy(pWInfo, pWInfo->pOrderBy, pFrom, pWInfo->wctrlFlags, iLoop, pWLoop, &revMask); @@ -164194,11 +171740,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pWInfo, nRowEst, nOrderBy, isOrdered ); } - /* TUNING: Add a small extra penalty (5) to sorting as an - ** extra encouragment to the query planner to select a plan + /* TUNING: Add a small extra penalty (3) to sorting as an + ** extra encouragement to the query planner to select a plan ** where the rows emerge in the correct order without any sorting ** required. */ - rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 5; + rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3; WHERETRACE(0x002, ("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n", @@ -164359,7 +171905,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( nFrom==0 ){ sqlite3ErrorMsg(pParse, "no query solution"); - sqlite3DbFreeNN(db, pSpace); + sqlite3StackFreeNN(pParse->db, pSpace); return SQLITE_ERROR; } @@ -164395,6 +171941,10 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){ pWInfo->eDistinct = WHERE_DISTINCT_ORDERED; } + if( pWInfo->pSelect->pOrderBy + && pWInfo->nOBSat > pWInfo->pSelect->pOrderBy->nExpr ){ + pWInfo->nOBSat = pWInfo->pSelect->pOrderBy->nExpr; + } }else{ pWInfo->revMask = pFrom->revLoop; if( pWInfo->nOBSat<=0 ){ @@ -164441,7 +171991,7 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){ pWInfo->nRowOut = pFrom->nRow; /* Free temporary memory and return success */ - sqlite3DbFreeNN(db, pSpace); + sqlite3StackFreeNN(pParse->db, pSpace); return SQLITE_OK; } @@ -164539,7 +172089,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){ pLoop->cId = '0'; #endif #ifdef WHERETRACE_ENABLED - if( sqlite3WhereTrace ){ + if( sqlite3WhereTrace & 0x02 ){ sqlite3DebugPrintf("whereShortCut() used to compute solution\n"); } #endif @@ -164606,6 +172156,13 @@ static void showAllWhereLoops(WhereInfo *pWInfo, WhereClause *pWC){ ** at most a single row. ** 4) The table must not be referenced by any part of the query apart ** from its own USING or ON clause. +** 5) The table must not have an inner-join ON or USING clause if there is +** a RIGHT JOIN anywhere in the query. Otherwise the ON/USING clause +** might move from the right side to the left side of the RIGHT JOIN. +** Note: Due to (2), this condition can only arise if the table is +** the right-most table of a subquery that was flattened into the +** main query and that subquery was the right-hand operand of an +** inner join that held an ON or USING clause. ** ** For example, given: ** @@ -164631,6 +172188,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( ){ int i; Bitmask tabUsed; + int hasRightJoin; /* Preconditions checked by the caller */ assert( pWInfo->nLevel>=2 ); @@ -164645,6 +172203,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( if( pWInfo->pOrderBy ){ tabUsed |= sqlite3WhereExprListUsage(&pWInfo->sMaskSet, pWInfo->pOrderBy); } + hasRightJoin = (pWInfo->pTabList->a[0].fg.jointype & JT_LTORJ)!=0; for(i=pWInfo->nLevel-1; i>=1; i--){ WhereTerm *pTerm, *pEnd; SrcItem *pItem; @@ -164667,9 +172226,15 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin( break; } } + if( hasRightJoin + && ExprHasProperty(pTerm->pExpr, EP_InnerON) + && pTerm->pExpr->w.iJoin==pItem->iCursor + ){ + break; /* restriction (5) */ + } } if( pTerm drop loop %c not used\n", pLoop->cId)); + WHERETRACE(0xffffffff, ("-> drop loop %c not used\n", pLoop->cId)); notReady &= ~pLoop->maskSelf; for(pTerm=pWInfo->sWC.a; pTermprereqAll & pLoop->maskSelf)!=0 ){ @@ -164708,28 +172273,27 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( const WhereInfo *pWInfo ){ int i; - LogEst nSearch; + LogEst nSearch = 0; assert( pWInfo->nLevel>=2 ); assert( OptimizationEnabled(pWInfo->pParse->db, SQLITE_BloomFilter) ); - nSearch = pWInfo->a[0].pWLoop->nOut; - for(i=1; inLevel; i++){ + for(i=0; inLevel; i++){ WhereLoop *pLoop = pWInfo->a[i].pWLoop; const unsigned int reqFlags = (WHERE_SELFCULL|WHERE_COLUMN_EQ); - if( (pLoop->wsFlags & reqFlags)==reqFlags + SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; + Table *pTab = pItem->pTab; + if( (pTab->tabFlags & TF_HasStat1)==0 ) break; + pTab->tabFlags |= TF_StatsUsed; + if( i>=1 + && (pLoop->wsFlags & reqFlags)==reqFlags /* vvvvvv--- Always the case if WHERE_COLUMN_EQ is defined */ && ALWAYS((pLoop->wsFlags & (WHERE_IPK|WHERE_INDEXED))!=0) ){ - SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab]; - Table *pTab = pItem->pTab; - pTab->tabFlags |= TF_StatsUsed; - if( nSearch > pTab->nRowLogEst - && (pTab->tabFlags & TF_HasStat1)!=0 - ){ + if( nSearch > pTab->nRowLogEst ){ testcase( pItem->fg.jointype & JT_LEFT ); pLoop->wsFlags |= WHERE_BLOOMFILTER; pLoop->wsFlags &= ~WHERE_IDX_ONLY; - WHERETRACE(0xffff, ( + WHERETRACE(0xffffffff, ( "-> use Bloom-filter on loop %c because there are ~%.1e " "lookups into %s which has only ~%.1e rows\n", pLoop->cId, (double)sqlite3LogEstToInt(nSearch), pTab->zName, @@ -164740,6 +172304,103 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( } } +/* +** The index pIdx is used by a query and contains one or more expressions. +** In other words pIdx is an index on an expression. iIdxCur is the cursor +** number for the index and iDataCur is the cursor number for the corresponding +** table. +** +** This routine adds IndexedExpr entries to the Parse->pIdxEpr field for +** each of the expressions in the index so that the expression code generator +** will know to replace occurrences of the indexed expression with +** references to the corresponding column of the index. +*/ +static SQLITE_NOINLINE void whereAddIndexedExpr( + Parse *pParse, /* Add IndexedExpr entries to pParse->pIdxEpr */ + Index *pIdx, /* The index-on-expression that contains the expressions */ + int iIdxCur, /* Cursor number for pIdx */ + SrcItem *pTabItem /* The FROM clause entry for the table */ +){ + int i; + IndexedExpr *p; + Table *pTab; + assert( pIdx->bHasExpr ); + pTab = pIdx->pTable; + for(i=0; inColumn; i++){ + Expr *pExpr; + int j = pIdx->aiColumn[i]; + if( j==XN_EXPR ){ + pExpr = pIdx->aColExpr->a[i].pExpr; + }else if( j>=0 && (pTab->aCol[j].colFlags & COLFLAG_VIRTUAL)!=0 ){ + pExpr = sqlite3ColumnExpr(pTab, &pTab->aCol[j]); + }else{ + continue; + } + if( sqlite3ExprIsConstant(pExpr) ) continue; + if( pExpr->op==TK_FUNCTION ){ + /* Functions that might set a subtype should not be replaced by the + ** value taken from an expression index since the index omits the + ** subtype. https://sqlite.org/forum/forumpost/68d284c86b082c3e */ + int n; + FuncDef *pDef; + sqlite3 *db = pParse->db; + assert( ExprUseXList(pExpr) ); + n = pExpr->x.pList ? pExpr->x.pList->nExpr : 0; + pDef = sqlite3FindFunction(db, pExpr->u.zToken, n, ENC(db), 0); + if( pDef==0 || (pDef->funcFlags & SQLITE_RESULT_SUBTYPE)!=0 ){ + continue; + } + } + p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr)); + if( p==0 ) break; + p->pIENext = pParse->pIdxEpr; +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x200 ){ + sqlite3DebugPrintf("New pParse->pIdxEpr term {%d,%d}\n", iIdxCur, i); + if( sqlite3WhereTrace & 0x5000 ) sqlite3ShowExpr(pExpr); + } +#endif + p->pExpr = sqlite3ExprDup(pParse->db, pExpr, 0); + p->iDataCur = pTabItem->iCursor; + p->iIdxCur = iIdxCur; + p->iIdxCol = i; + p->bMaybeNullRow = (pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT))!=0; + if( sqlite3IndexAffinityStr(pParse->db, pIdx) ){ + p->aff = pIdx->zColAff[i]; + } +#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS + p->zIdxName = pIdx->zName; +#endif + pParse->pIdxEpr = p; + if( p->pIENext==0 ){ + void *pArg = (void*)&pParse->pIdxEpr; + sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pArg); + } + } +} + +/* +** Set the reverse-scan order mask to one for all tables in the query +** with the exception of MATERIALIZED common table expressions that have +** their own internal ORDER BY clauses. +** +** This implements the PRAGMA reverse_unordered_selects=ON setting. +** (Also SQLITE_DBCONFIG_REVERSE_SCANORDER). +*/ +static SQLITE_NOINLINE void whereReverseScanOrder(WhereInfo *pWInfo){ + int ii; + for(ii=0; iipTabList->nSrc; ii++){ + SrcItem *pItem = &pWInfo->pTabList->a[ii]; + if( !pItem->fg.isCte + || pItem->u2.pCteUse->eM10d!=M10d_Yes + || NEVER(pItem->pSelect==0) + || pItem->pSelect->pOrderBy==0 + ){ + pWInfo->revMask |= MASKBIT(ii); + } + } +} + /* ** Generate the beginning of the loop used for WHERE clause processing. ** The return value is a pointer to an opaque structure that contains @@ -164798,7 +172459,7 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful( ** ** OUTER JOINS ** -** An outer join of tables t1 and t2 is conceptally coded as follows: +** An outer join of tables t1 and t2 is conceptually coded as follows: ** ** foreach row1 in t1 do ** flag = 0 @@ -164834,7 +172495,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( Expr *pWhere, /* The WHERE clause */ ExprList *pOrderBy, /* An ORDER BY (or GROUP BY) clause, or NULL */ ExprList *pResultSet, /* Query result set. Req'd for DISTINCT */ - Select *pLimit, /* Use this LIMIT/OFFSET clause, if any */ + Select *pSelect, /* The entire SELECT statement */ u16 wctrlFlags, /* The WHERE_* flags defined in sqliteInt.h */ int iAuxArg /* If WHERE_OR_SUBCLAUSE is set, index cursor number ** If WHERE_USE_LIMIT, then the limit amount */ @@ -164868,7 +172529,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* An ORDER/GROUP BY clause of more than 63 terms cannot be optimized */ testcase( pOrderBy && pOrderBy->nExpr==BMS-1 ); - if( pOrderBy && pOrderBy->nExpr>=BMS ) pOrderBy = 0; + if( pOrderBy && pOrderBy->nExpr>=BMS ){ + pOrderBy = 0; + wctrlFlags &= ~WHERE_WANT_DISTINCT; + } /* The number of tables in the FROM clause is limited by the number of ** bits in a Bitmask @@ -164893,7 +172557,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** field (type Bitmask) it must be aligned on an 8-byte boundary on ** some architectures. Hence the ROUND8() below. */ - nByteWInfo = ROUND8P(sizeof(WhereInfo)+(nTabList-1)*sizeof(WhereLevel)); + nByteWInfo = ROUND8P(sizeof(WhereInfo)); + if( nTabList>1 ){ + nByteWInfo = ROUND8P(nByteWInfo + (nTabList-1)*sizeof(WhereLevel)); + } pWInfo = sqlite3DbMallocRawNN(db, nByteWInfo + sizeof(WhereLoop)); if( db->mallocFailed ){ sqlite3DbFree(db, pWInfo); @@ -164903,7 +172570,9 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pWInfo->pParse = pParse; pWInfo->pTabList = pTabList; pWInfo->pOrderBy = pOrderBy; +#if WHERETRACE_ENABLED pWInfo->pWhere = pWhere; +#endif pWInfo->pResultSet = pResultSet; pWInfo->aiCurOnePass[0] = pWInfo->aiCurOnePass[1] = -1; pWInfo->nLevel = nTabList; @@ -164911,9 +172580,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( pWInfo->wctrlFlags = wctrlFlags; pWInfo->iLimit = iAuxArg; pWInfo->savedNQueryLoop = pParse->nQueryLoop; -#ifndef SQLITE_OMIT_VIRTUALTABLE - pWInfo->pLimit = pLimit; -#endif + pWInfo->pSelect = pSelect; memset(&pWInfo->nOBSat, 0, offsetof(WhereInfo,sWC) - offsetof(WhereInfo,nOBSat)); memset(&pWInfo->a[0], 0, sizeof(WhereLoop)+nTabList*sizeof(WhereLevel)); @@ -164953,7 +172620,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** ** The N-th term of the FROM clause is assigned a bitmask of 1<sWC); - sqlite3WhereAddLimit(&pWInfo->sWC, pLimit); + if( pSelect && pSelect->pLimit ){ + sqlite3WhereAddLimit(&pWInfo->sWC, pSelect); + } if( pParse->nErr ) goto whereBeginError; - /* Special case: WHERE terms that do not refer to any tables in the join - ** (constant expressions). Evaluate each such term, and jump over all the - ** generated code if the result is not true. + /* The False-WHERE-Term-Bypass optimization: ** - ** Do not do this if the expression contains non-deterministic functions - ** that are not within a sub-select. This is not strictly required, but - ** preserves SQLite's legacy behaviour in the following two cases: + ** If there are WHERE terms that are false, then no rows will be output, + ** so skip over all of the code generated here. ** - ** FROM ... WHERE random()>0; -- eval random() once per row - ** FROM ... WHERE (SELECT random())>0; -- eval random() once overall + ** Conditions: + ** + ** (1) The WHERE term must not refer to any tables in the join. + ** (2) The term must not come from an ON clause on the + ** right-hand side of a LEFT or FULL JOIN. + ** (3) The term must not come from an ON clause, or there must be + ** no RIGHT or FULL OUTER joins in pTabList. + ** (4) If the expression contains non-deterministic functions + ** that are not within a sub-select. This is not required + ** for correctness but rather to preserves SQLite's legacy + ** behaviour in the following two cases: + ** + ** WHERE random()>0; -- eval random() once per row + ** WHERE (SELECT random())>0; -- eval random() just once overall + ** + ** Note that the Where term need not be a constant in order for this + ** optimization to apply, though it does need to be constant relative to + ** the current subquery (condition 1). The term might include variables + ** from outer queries so that the value of the term changes from one + ** invocation of the current subquery to the next. */ for(ii=0; iinBase; ii++){ - WhereTerm *pT = &sWLB.pWC->a[ii]; + WhereTerm *pT = &sWLB.pWC->a[ii]; /* A term of the WHERE clause */ + Expr *pX; /* The expression of pT */ if( pT->wtFlags & TERM_VIRTUAL ) continue; - if( pT->prereqAll==0 && (nTabList==0 || exprIsDeterministic(pT->pExpr)) ){ - sqlite3ExprIfFalse(pParse, pT->pExpr, pWInfo->iBreak, SQLITE_JUMPIFNULL); + pX = pT->pExpr; + assert( pX!=0 ); + assert( pT->prereqAll!=0 || !ExprHasProperty(pX, EP_OuterON) ); + if( pT->prereqAll==0 /* Conditions (1) and (2) */ + && (nTabList==0 || exprIsDeterministic(pX)) /* Condition (4) */ + && !(ExprHasProperty(pX, EP_InnerON) /* Condition (3) */ + && (pTabList->a[0].fg.jointype & JT_LTORJ)!=0 ) + ){ + sqlite3ExprIfFalse(pParse, pX, pWInfo->iBreak, SQLITE_JUMPIFNULL); pT->wtFlags |= TERM_CODED; } } @@ -165023,13 +172715,13 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* Construct the WhereLoop objects */ #if defined(WHERETRACE_ENABLED) - if( sqlite3WhereTrace & 0xffff ){ + if( sqlite3WhereTrace & 0xffffffff ){ sqlite3DebugPrintf("*** Optimizer Start *** (wctrlFlags: 0x%x",wctrlFlags); if( wctrlFlags & WHERE_USE_LIMIT ){ sqlite3DebugPrintf(", limit: %d", iAuxArg); } sqlite3DebugPrintf(")\n"); - if( sqlite3WhereTrace & 0x100 ){ + if( sqlite3WhereTrace & 0x8000 ){ Select sSelect; memset(&sSelect, 0, sizeof(sSelect)); sSelect.selFlags = SF_WhereBegin; @@ -165039,10 +172731,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sSelect.pEList = pResultSet; sqlite3TreeViewSelect(0, &sSelect, 0); } - } - if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ - sqlite3DebugPrintf("---- WHERE clause at start of analysis:\n"); - sqlite3WhereClausePrint(sWLB.pWC); + if( sqlite3WhereTrace & 0x4000 ){ /* Display all WHERE clause terms */ + sqlite3DebugPrintf("---- WHERE clause at start of analysis:\n"); + sqlite3WhereClausePrint(sWLB.pWC); + } } #endif @@ -165058,7 +172750,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( ** loops will be built using the revised truthProb values. */ if( sWLB.bldFlags2 & SQLITE_BLDF2_2NDPASS ){ WHERETRACE_ALL_LOOPS(pWInfo, sWLB.pWC); - WHERETRACE(0xffff, + WHERETRACE(0xffffffff, ("**** Redo all loop computations due to" " TERM_HIGHTRUTH changes ****\n")); while( pWInfo->pLoops ){ @@ -165078,9 +172770,20 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( wherePathSolver(pWInfo, pWInfo->nRowOut+1); if( db->mallocFailed ) goto whereBeginError; } + + /* TUNING: Assume that a DISTINCT clause on a subquery reduces + ** the output size by a factor of 8 (LogEst -30). + */ + if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT)!=0 ){ + WHERETRACE(0x0080,("nRowOut reduced from %d to %d due to DISTINCT\n", + pWInfo->nRowOut, pWInfo->nRowOut-30)); + pWInfo->nRowOut -= 30; + } + } + assert( pWInfo->pTabList!=0 ); if( pWInfo->pOrderBy==0 && (db->flags & SQLITE_ReverseOrder)!=0 ){ - pWInfo->revMask = ALLBITS; + whereReverseScanOrder(pWInfo); } if( pParse->nErr ){ goto whereBeginError; @@ -165144,11 +172847,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( } #if defined(WHERETRACE_ENABLED) - if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */ + if( sqlite3WhereTrace & 0x4000 ){ /* Display all terms of the WHERE clause */ sqlite3DebugPrintf("---- WHERE clause at end of analysis:\n"); sqlite3WhereClausePrint(sWLB.pWC); } - WHERETRACE(0xffff,("*** Optimizer Finished ***\n")); + WHERETRACE(0xffffffff,("*** Optimizer Finished ***\n")); #endif pWInfo->pParse->nQueryLoop += pWInfo->nRowOut; @@ -165180,6 +172883,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( 0!=(wctrlFlags & WHERE_ONEPASS_MULTIROW) && !IsVirtual(pTabList->a[0].pTab) && (0==(wsFlags & WHERE_MULTI_OR) || (wctrlFlags & WHERE_DUPLICATES_OK)) + && OptimizationEnabled(db, SQLITE_OnePass) )){ pWInfo->eOnePass = bOnerow ? ONEPASS_SINGLE : ONEPASS_MULTI; if( HasRowid(pTabList->a[0].pTab) && (wsFlags & WHERE_IDX_ONLY) ){ @@ -165243,7 +172947,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( assert( n<=pTab->nCol ); } #ifdef SQLITE_ENABLE_CURSOR_HINTS - if( pLoop->u.btree.pIndex!=0 ){ + if( pLoop->u.btree.pIndex!=0 && (pTab->tabFlags & TF_WithoutRowid)==0 ){ sqlite3VdbeChangeP5(v, OPFLAG_SEEKEQ|bFordelete); }else #endif @@ -165285,6 +172989,14 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( op = OP_ReopenIdx; }else{ iIndexCur = pParse->nTab++; + if( pIx->bHasExpr && OptimizationEnabled(db, SQLITE_IndexedExpr) ){ + whereAddIndexedExpr(pParse, pIx, iIndexCur, pTabItem); + } + if( pIx->pPartIdxWhere && (pTabItem->fg.jointype & JT_RIGHT)==0 ){ + wherePartIdxExpr( + pParse, pIx, pIx->pPartIdxWhere, 0, iIndexCur, pTabItem + ); + } } pLevel->iIdxCur = iIndexCur; assert( pIx!=0 ); @@ -165377,11 +173089,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( sqlite3VdbeJumpHere(v, iOnce); } } + assert( pTabList == pWInfo->pTabList ); if( (wsFlags & (WHERE_AUTO_INDEX|WHERE_BLOOMFILTER))!=0 ){ if( (wsFlags & WHERE_AUTO_INDEX)!=0 ){ #ifndef SQLITE_OMIT_AUTOMATIC_INDEX - constructAutomaticIndex(pParse, &pWInfo->sWC, - &pTabList->a[pLevel->iFrom], notReady, pLevel); + constructAutomaticIndex(pParse, &pWInfo->sWC, notReady, pLevel); #endif }else{ sqlite3ConstructBloomFilter(pWInfo, ii, pLevel, notReady); @@ -165407,11 +173119,14 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin( /* Jump here if malloc fails */ whereBeginError: if( pWInfo ){ - testcase( pWInfo->pExprMods!=0 ); - whereUndoExprMods(pWInfo); pParse->nQueryLoop = pWInfo->savedNQueryLoop; whereInfoFree(db, pWInfo); } +#ifdef WHERETRACE_ENABLED + /* Prevent harmless compiler warnings about debugging routines + ** being declared but never used */ + sqlite3ShowWhereLoopList(0); +#endif /* WHERETRACE_ENABLED */ return 0; } @@ -165627,7 +173342,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ } assert( pWInfo->nLevel<=pTabList->nSrc ); - if( pWInfo->pExprMods ) whereUndoExprMods(pWInfo); for(i=0, pLevel=pWInfo->a; inLevel; i++, pLevel++){ int k, last; VdbeOp *pOp, *pLastOp; @@ -165681,10 +173395,28 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ }else{ last = pWInfo->iEndWhere; } + if( pIdx->bHasExpr ){ + IndexedExpr *p = pParse->pIdxEpr; + while( p ){ + if( p->iIdxCur==pLevel->iIdxCur ){ +#ifdef WHERETRACE_ENABLED + if( sqlite3WhereTrace & 0x200 ){ + sqlite3DebugPrintf("Disable pParse->pIdxEpr term {%d,%d}\n", + p->iIdxCur, p->iIdxCol); + if( sqlite3WhereTrace & 0x5000 ) sqlite3ShowExpr(p->pExpr); + } +#endif + p->iDataCur = -1; + p->iIdxCur = -1; + } + p = p->pIENext; + } + } k = pLevel->addrBody + 1; #ifdef SQLITE_DEBUG if( db->flags & SQLITE_VdbeAddopTrace ){ - printf("TRANSLATE opcodes in range %d..%d\n", k, last-1); + printf("TRANSLATE cursor %d->%d in opcode range %d..%d\n", + pLevel->iTabCur, pLevel->iIdxCur, k, last-1); } /* Proof that the "+1" on the k value above is safe */ pOp = sqlite3VdbeGetOp(v, k - 1); @@ -165891,7 +173623,7 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){ ** ** These are the same built-in window functions supported by Postgres. ** Although the behaviour of aggregate window functions (functions that -** can be used as either aggregates or window funtions) allows them to +** can be used as either aggregates or window functions) allows them to ** be implemented using an API, built-in window functions are much more ** esoteric. Additionally, some window functions (e.g. nth_value()) ** may only be implemented by caching the entire partition in memory. @@ -166421,7 +174153,7 @@ static Window *windowFind(Parse *pParse, Window *pList, const char *zName){ ** is the Window object representing the associated OVER clause. This ** function updates the contents of pWin as follows: ** -** * If the OVER clause refered to a named window (as in "max(x) OVER win"), +** * If the OVER clause referred to a named window (as in "max(x) OVER win"), ** search list pList for a matching WINDOW definition, and update pWin ** accordingly. If no such WINDOW clause can be found, leave an error ** in pParse. @@ -166559,6 +174291,7 @@ static int selectWindowRewriteExprCb(Walker *pWalker, Expr *pExpr){ } /* no break */ deliberate_fall_through + case TK_IF_NULL_ROW: case TK_AGG_FUNCTION: case TK_COLUMN: { int iCol = -1; @@ -166674,7 +174407,6 @@ static ExprList *exprListAppendList( for(i=0; inExpr; i++){ sqlite3 *db = pParse->db; Expr *pDup = sqlite3ExprDup(db, pAppend->a[i].pExpr, 0); - assert( pDup==0 || !ExprHasProperty(pDup, EP_MemToken) ); if( db->mallocFailed ){ sqlite3ExprDelete(db, pDup); break; @@ -166812,7 +174544,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ assert( ExprUseXList(pWin->pOwner) ); assert( pWin->pWFunc!=0 ); pArgs = pWin->pOwner->x.pList; - if( pWin->pWFunc->funcFlags & SQLITE_FUNC_SUBTYPE ){ + if( pWin->pWFunc->funcFlags & SQLITE_SUBTYPE ){ selectWindowRewriteEList(pParse, pMWin, pSrc, pArgs, pTab, &pSublist); pWin->iArgCol = (pSublist ? pSublist->nExpr : 0); pWin->bExprArgs = 1; @@ -166844,7 +174576,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ pSub = sqlite3SelectNew( pParse, pSublist, pSrc, pWhere, pGroupBy, pHaving, pSort, 0, 0 ); - SELECTTRACE(1,pParse,pSub, + TREETRACE(0x40,pParse,pSub, ("New window-function subquery in FROM clause of (%u/%p)\n", p->selId, p)); p->pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0); @@ -166854,6 +174586,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){ if( p->pSrc ){ Table *pTab2; p->pSrc->a[0].pSelect = pSub; + p->pSrc->a[0].fg.isCorrelated = 1; sqlite3SrcListAssignCursors(pParse, p->pSrc); pSub->selFlags |= SF_Expanded|SF_OrderByReqd; pTab2 = sqlite3ResultSetOfSelect(pParse, pSub, SQLITE_AFF_NONE); @@ -167041,7 +174774,7 @@ SQLITE_PRIVATE Window *sqlite3WindowAssemble( } /* -** Window *pWin has just been created from a WINDOW clause. Tokne pBase +** Window *pWin has just been created from a WINDOW clause. Token pBase ** is the base window. Earlier windows from the same WINDOW clause are ** stored in the linked list starting at pWin->pNextWin. This function ** either updates *pWin according to the base specification, or else @@ -167085,8 +174818,9 @@ SQLITE_PRIVATE void sqlite3WindowAttach(Parse *pParse, Expr *p, Window *pWin){ if( p ){ assert( p->op==TK_FUNCTION ); assert( pWin ); + assert( ExprIsFullSize(p) ); p->y.pWin = pWin; - ExprSetProperty(p, EP_WinFunc); + ExprSetProperty(p, EP_WinFunc|EP_FullSize); pWin->pOwner = p; if( (p->flags & EP_Distinct) && pWin->eFrmType!=TK_FILTER ){ sqlite3ErrorMsg(pParse, @@ -167347,7 +175081,7 @@ struct WindowCsrAndReg { ** ** (ORDER BY a, b GROUPS BETWEEN 2 PRECEDING AND 2 FOLLOWING) ** -** The windows functions implmentation caches the input rows in a temp +** The windows functions implementation caches the input rows in a temp ** table, sorted by "a, b" (it actually populates the cache lazily, and ** aggressively removes rows once they are no longer required, but that's ** a mere detail). It keeps three cursors open on the temp table. One @@ -167945,10 +175679,9 @@ static void windowCodeRangeTest( /* This block runs if reg1 is not NULL, but reg2 is. */ sqlite3VdbeJumpHere(v, addr); - sqlite3VdbeAddOp2(v, OP_IsNull, reg2, lbl); VdbeCoverage(v); - if( op==OP_Gt || op==OP_Ge ){ - sqlite3VdbeChangeP2(v, -1, addrDone); - } + sqlite3VdbeAddOp2(v, OP_IsNull, reg2, + (op==OP_Gt || op==OP_Ge) ? addrDone : lbl); + VdbeCoverage(v); } /* Register reg1 currently contains csr1.peerVal (the peer-value from csr1). @@ -168357,7 +176090,7 @@ static int windowExprGtZero(Parse *pParse, Expr *pExpr){ ** ** For the most part, the patterns above are adapted to support UNBOUNDED by ** assuming that it is equivalent to "infinity PRECEDING/FOLLOWING" and -** CURRENT ROW by assuming that it is equivilent to "0 PRECEDING/FOLLOWING". +** CURRENT ROW by assuming that it is equivalent to "0 PRECEDING/FOLLOWING". ** This is optimized of course - branches that will never be taken and ** conditions that are always true are omitted from the VM code. The only ** exceptional case is: @@ -168636,7 +176369,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( } /* Allocate registers for the array of values from the sub-query, the - ** samve values in record form, and the rowid used to insert said record + ** same values in record form, and the rowid used to insert said record ** into the ephemeral table. */ regNew = pParse->nMem+1; pParse->nMem += nInput; @@ -168720,8 +176453,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( VdbeCoverageNeverNullIf(v, op==OP_Ge); /* NeverNull because bound */ VdbeCoverageNeverNullIf(v, op==OP_Le); /* values previously checked */ windowAggFinal(&s, 0); - sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1); - VdbeCoverageNeverTaken(v); + sqlite3VdbeAddOp1(v, OP_Rewind, s.current.csr); windowReturnOneRow(&s); sqlite3VdbeAddOp1(v, OP_ResetSorter, s.current.csr); sqlite3VdbeAddOp2(v, OP_Goto, 0, lblWhereEnd); @@ -168733,13 +176465,10 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( } if( pMWin->eStart!=TK_UNBOUNDED ){ - sqlite3VdbeAddOp2(v, OP_Rewind, s.start.csr, 1); - VdbeCoverageNeverTaken(v); + sqlite3VdbeAddOp1(v, OP_Rewind, s.start.csr); } - sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1); - VdbeCoverageNeverTaken(v); - sqlite3VdbeAddOp2(v, OP_Rewind, s.end.csr, 1); - VdbeCoverageNeverTaken(v); + sqlite3VdbeAddOp1(v, OP_Rewind, s.current.csr); + sqlite3VdbeAddOp1(v, OP_Rewind, s.end.csr); if( regPeer && pOrderBy ){ sqlite3VdbeAddOp3(v, OP_Copy, regNewPeer, regPeer, pOrderBy->nExpr-1); sqlite3VdbeAddOp3(v, OP_Copy, regPeer, s.start.reg, pOrderBy->nExpr-1); @@ -168881,7 +176610,8 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( /************** End of window.c **********************************************/ /************** Begin file parse.c *******************************************/ /* This file is automatically generated by Lemon from input grammar -** source file "parse.y". */ +** source file "parse.y". +*/ /* ** 2001-09-15 ** @@ -168898,7 +176628,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep( ** The canonical source code to this file ("parse.y") is a Lemon grammar ** file that specifies the input grammar and actions to take while parsing. ** That input file is processed by Lemon to generate a C-language -** implementation of a parser for the given grammer. You might be reading +** implementation of a parser for the given grammar. You might be reading ** this comment as part of the translated C-code. Edits should be made ** to the original parse.y sources. */ @@ -169392,18 +177122,18 @@ typedef union { #define sqlite3ParserCTX_FETCH Parse *pParse=yypParser->pParse; #define sqlite3ParserCTX_STORE yypParser->pParse=pParse; #define YYFALLBACK 1 -#define YYNSTATE 576 +#define YYNSTATE 579 #define YYNRULE 405 -#define YYNRULE_WITH_ACTION 342 +#define YYNRULE_WITH_ACTION 340 #define YYNTOKEN 185 -#define YY_MAX_SHIFT 575 -#define YY_MIN_SHIFTREDUCE 835 -#define YY_MAX_SHIFTREDUCE 1239 -#define YY_ERROR_ACTION 1240 -#define YY_ACCEPT_ACTION 1241 -#define YY_NO_ACTION 1242 -#define YY_MIN_REDUCE 1243 -#define YY_MAX_REDUCE 1647 +#define YY_MAX_SHIFT 578 +#define YY_MIN_SHIFTREDUCE 838 +#define YY_MAX_SHIFTREDUCE 1242 +#define YY_ERROR_ACTION 1243 +#define YY_ACCEPT_ACTION 1244 +#define YY_NO_ACTION 1245 +#define YY_MIN_REDUCE 1246 +#define YY_MAX_REDUCE 1650 /************* End control #defines *******************************************/ #define YY_NLOOKAHEAD ((int)(sizeof(yy_lookahead)/sizeof(yy_lookahead[0]))) @@ -169470,218 +177200,218 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (2098) +#define YY_ACTTAB_COUNT (2100) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 568, 208, 568, 118, 115, 229, 568, 118, 115, 229, - /* 10 */ 568, 1314, 377, 1293, 408, 562, 562, 562, 568, 409, - /* 20 */ 378, 1314, 1276, 41, 41, 41, 41, 208, 1526, 71, - /* 30 */ 71, 971, 419, 41, 41, 491, 303, 279, 303, 972, - /* 40 */ 397, 71, 71, 125, 126, 80, 1217, 1217, 1050, 1053, - /* 50 */ 1040, 1040, 123, 123, 124, 124, 124, 124, 476, 409, - /* 60 */ 1241, 1, 1, 575, 2, 1245, 550, 118, 115, 229, - /* 70 */ 317, 480, 146, 480, 524, 118, 115, 229, 529, 1327, - /* 80 */ 417, 523, 142, 125, 126, 80, 1217, 1217, 1050, 1053, - /* 90 */ 1040, 1040, 123, 123, 124, 124, 124, 124, 118, 115, - /* 100 */ 229, 327, 122, 122, 122, 122, 121, 121, 120, 120, - /* 110 */ 120, 119, 116, 444, 284, 284, 284, 284, 442, 442, - /* 120 */ 442, 1567, 376, 1569, 1192, 375, 1163, 565, 1163, 565, - /* 130 */ 409, 1567, 537, 259, 226, 444, 101, 145, 449, 316, - /* 140 */ 559, 240, 122, 122, 122, 122, 121, 121, 120, 120, - /* 150 */ 120, 119, 116, 444, 125, 126, 80, 1217, 1217, 1050, - /* 160 */ 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, 142, - /* 170 */ 294, 1192, 339, 448, 120, 120, 120, 119, 116, 444, - /* 180 */ 127, 1192, 1193, 1194, 148, 441, 440, 568, 119, 116, - /* 190 */ 444, 124, 124, 124, 124, 117, 122, 122, 122, 122, - /* 200 */ 121, 121, 120, 120, 120, 119, 116, 444, 454, 113, - /* 210 */ 13, 13, 546, 122, 122, 122, 122, 121, 121, 120, - /* 220 */ 120, 120, 119, 116, 444, 422, 316, 559, 1192, 1193, - /* 230 */ 1194, 149, 1224, 409, 1224, 124, 124, 124, 124, 122, - /* 240 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116, - /* 250 */ 444, 465, 342, 1037, 1037, 1051, 1054, 125, 126, 80, - /* 260 */ 1217, 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, - /* 270 */ 124, 124, 1279, 522, 222, 1192, 568, 409, 224, 514, - /* 280 */ 175, 82, 83, 122, 122, 122, 122, 121, 121, 120, - /* 290 */ 120, 120, 119, 116, 444, 1007, 16, 16, 1192, 133, - /* 300 */ 133, 125, 126, 80, 1217, 1217, 1050, 1053, 1040, 1040, - /* 310 */ 123, 123, 124, 124, 124, 124, 122, 122, 122, 122, - /* 320 */ 121, 121, 120, 120, 120, 119, 116, 444, 1041, 546, - /* 330 */ 1192, 373, 1192, 1193, 1194, 252, 1434, 399, 504, 501, - /* 340 */ 500, 111, 560, 566, 4, 926, 926, 433, 499, 340, - /* 350 */ 460, 328, 360, 394, 1237, 1192, 1193, 1194, 563, 568, - /* 360 */ 122, 122, 122, 122, 121, 121, 120, 120, 120, 119, - /* 370 */ 116, 444, 284, 284, 369, 1580, 1607, 441, 440, 154, - /* 380 */ 409, 445, 71, 71, 1286, 565, 1221, 1192, 1193, 1194, - /* 390 */ 85, 1223, 271, 557, 543, 515, 1561, 568, 98, 1222, - /* 400 */ 6, 1278, 472, 142, 125, 126, 80, 1217, 1217, 1050, - /* 410 */ 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, 550, - /* 420 */ 13, 13, 1027, 507, 1224, 1192, 1224, 549, 109, 109, - /* 430 */ 222, 568, 1238, 175, 568, 427, 110, 197, 445, 570, - /* 440 */ 569, 430, 1552, 1017, 325, 551, 1192, 270, 287, 368, - /* 450 */ 510, 363, 509, 257, 71, 71, 543, 71, 71, 359, - /* 460 */ 316, 559, 1613, 122, 122, 122, 122, 121, 121, 120, - /* 470 */ 120, 120, 119, 116, 444, 1017, 1017, 1019, 1020, 27, - /* 480 */ 284, 284, 1192, 1193, 1194, 1158, 568, 1612, 409, 901, - /* 490 */ 190, 550, 356, 565, 550, 937, 533, 517, 1158, 516, - /* 500 */ 413, 1158, 552, 1192, 1193, 1194, 568, 544, 1554, 51, - /* 510 */ 51, 214, 125, 126, 80, 1217, 1217, 1050, 1053, 1040, - /* 520 */ 1040, 123, 123, 124, 124, 124, 124, 1192, 474, 135, - /* 530 */ 135, 409, 284, 284, 1490, 505, 121, 121, 120, 120, - /* 540 */ 120, 119, 116, 444, 1007, 565, 518, 217, 541, 1561, - /* 550 */ 316, 559, 142, 6, 532, 125, 126, 80, 1217, 1217, - /* 560 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 570 */ 1555, 122, 122, 122, 122, 121, 121, 120, 120, 120, - /* 580 */ 119, 116, 444, 485, 1192, 1193, 1194, 482, 281, 1267, - /* 590 */ 957, 252, 1192, 373, 504, 501, 500, 1192, 340, 571, - /* 600 */ 1192, 571, 409, 292, 499, 957, 876, 191, 480, 316, - /* 610 */ 559, 384, 290, 380, 122, 122, 122, 122, 121, 121, - /* 620 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 630 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 640 */ 124, 409, 394, 1136, 1192, 869, 100, 284, 284, 1192, - /* 650 */ 1193, 1194, 373, 1093, 1192, 1193, 1194, 1192, 1193, 1194, - /* 660 */ 565, 455, 32, 373, 233, 125, 126, 80, 1217, 1217, - /* 670 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 680 */ 1433, 959, 568, 228, 958, 122, 122, 122, 122, 121, - /* 690 */ 121, 120, 120, 120, 119, 116, 444, 1158, 228, 1192, - /* 700 */ 157, 1192, 1193, 1194, 1553, 13, 13, 301, 957, 1232, - /* 710 */ 1158, 153, 409, 1158, 373, 1583, 1176, 5, 369, 1580, - /* 720 */ 429, 1238, 3, 957, 122, 122, 122, 122, 121, 121, - /* 730 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 740 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 750 */ 124, 409, 208, 567, 1192, 1028, 1192, 1193, 1194, 1192, - /* 760 */ 388, 852, 155, 1552, 286, 402, 1098, 1098, 488, 568, - /* 770 */ 465, 342, 1319, 1319, 1552, 125, 126, 80, 1217, 1217, - /* 780 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 790 */ 129, 568, 13, 13, 374, 122, 122, 122, 122, 121, - /* 800 */ 121, 120, 120, 120, 119, 116, 444, 302, 568, 453, - /* 810 */ 528, 1192, 1193, 1194, 13, 13, 1192, 1193, 1194, 1297, - /* 820 */ 463, 1267, 409, 1317, 1317, 1552, 1012, 453, 452, 200, - /* 830 */ 299, 71, 71, 1265, 122, 122, 122, 122, 121, 121, - /* 840 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 850 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 860 */ 124, 409, 227, 1073, 1158, 284, 284, 419, 312, 278, - /* 870 */ 278, 285, 285, 1419, 406, 405, 382, 1158, 565, 568, - /* 880 */ 1158, 1196, 565, 1600, 565, 125, 126, 80, 1217, 1217, - /* 890 */ 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, 124, - /* 900 */ 453, 1482, 13, 13, 1536, 122, 122, 122, 122, 121, - /* 910 */ 121, 120, 120, 120, 119, 116, 444, 201, 568, 354, - /* 920 */ 1586, 575, 2, 1245, 840, 841, 842, 1562, 317, 1212, - /* 930 */ 146, 6, 409, 255, 254, 253, 206, 1327, 9, 1196, - /* 940 */ 262, 71, 71, 424, 122, 122, 122, 122, 121, 121, - /* 950 */ 120, 120, 120, 119, 116, 444, 125, 126, 80, 1217, - /* 960 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 970 */ 124, 568, 284, 284, 568, 1213, 409, 574, 313, 1245, - /* 980 */ 349, 1296, 352, 419, 317, 565, 146, 491, 525, 1643, - /* 990 */ 395, 371, 491, 1327, 70, 70, 1295, 71, 71, 240, - /* 1000 */ 1325, 104, 80, 1217, 1217, 1050, 1053, 1040, 1040, 123, - /* 1010 */ 123, 124, 124, 124, 124, 122, 122, 122, 122, 121, - /* 1020 */ 121, 120, 120, 120, 119, 116, 444, 1114, 284, 284, - /* 1030 */ 428, 448, 1525, 1213, 439, 284, 284, 1489, 1352, 311, - /* 1040 */ 474, 565, 1115, 971, 491, 491, 217, 1263, 565, 1538, - /* 1050 */ 568, 972, 207, 568, 1027, 240, 383, 1116, 519, 122, - /* 1060 */ 122, 122, 122, 121, 121, 120, 120, 120, 119, 116, - /* 1070 */ 444, 1018, 107, 71, 71, 1017, 13, 13, 912, 568, - /* 1080 */ 1495, 568, 284, 284, 97, 526, 491, 448, 913, 1326, - /* 1090 */ 1322, 545, 409, 284, 284, 565, 151, 209, 1495, 1497, - /* 1100 */ 262, 450, 55, 55, 56, 56, 565, 1017, 1017, 1019, - /* 1110 */ 443, 332, 409, 527, 12, 295, 125, 126, 80, 1217, - /* 1120 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 1130 */ 124, 347, 409, 864, 1534, 1213, 125, 126, 80, 1217, - /* 1140 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 1150 */ 124, 1137, 1641, 474, 1641, 371, 125, 114, 80, 1217, - /* 1160 */ 1217, 1050, 1053, 1040, 1040, 123, 123, 124, 124, 124, - /* 1170 */ 124, 1495, 329, 474, 331, 122, 122, 122, 122, 121, - /* 1180 */ 121, 120, 120, 120, 119, 116, 444, 203, 1419, 568, - /* 1190 */ 1294, 864, 464, 1213, 436, 122, 122, 122, 122, 121, - /* 1200 */ 121, 120, 120, 120, 119, 116, 444, 553, 1137, 1642, - /* 1210 */ 539, 1642, 15, 15, 892, 122, 122, 122, 122, 121, - /* 1220 */ 121, 120, 120, 120, 119, 116, 444, 568, 298, 538, - /* 1230 */ 1135, 1419, 1559, 1560, 1331, 409, 6, 6, 1169, 1268, - /* 1240 */ 415, 320, 284, 284, 1419, 508, 565, 525, 300, 457, - /* 1250 */ 43, 43, 568, 893, 12, 565, 330, 478, 425, 407, - /* 1260 */ 126, 80, 1217, 1217, 1050, 1053, 1040, 1040, 123, 123, - /* 1270 */ 124, 124, 124, 124, 568, 57, 57, 288, 1192, 1419, - /* 1280 */ 496, 458, 392, 392, 391, 273, 389, 1135, 1558, 849, - /* 1290 */ 1169, 407, 6, 568, 321, 1158, 470, 44, 44, 1557, - /* 1300 */ 1114, 426, 234, 6, 323, 256, 540, 256, 1158, 431, - /* 1310 */ 568, 1158, 322, 17, 487, 1115, 58, 58, 122, 122, - /* 1320 */ 122, 122, 121, 121, 120, 120, 120, 119, 116, 444, - /* 1330 */ 1116, 216, 481, 59, 59, 1192, 1193, 1194, 111, 560, - /* 1340 */ 324, 4, 236, 456, 526, 568, 237, 456, 568, 437, - /* 1350 */ 168, 556, 420, 141, 479, 563, 568, 293, 568, 1095, - /* 1360 */ 568, 293, 568, 1095, 531, 568, 872, 8, 60, 60, - /* 1370 */ 235, 61, 61, 568, 414, 568, 414, 568, 445, 62, - /* 1380 */ 62, 45, 45, 46, 46, 47, 47, 199, 49, 49, - /* 1390 */ 557, 568, 359, 568, 100, 486, 50, 50, 63, 63, - /* 1400 */ 64, 64, 561, 415, 535, 410, 568, 1027, 568, 534, - /* 1410 */ 316, 559, 316, 559, 65, 65, 14, 14, 568, 1027, - /* 1420 */ 568, 512, 932, 872, 1018, 109, 109, 931, 1017, 66, - /* 1430 */ 66, 131, 131, 110, 451, 445, 570, 569, 416, 177, - /* 1440 */ 1017, 132, 132, 67, 67, 568, 467, 568, 932, 471, - /* 1450 */ 1364, 283, 226, 931, 315, 1363, 407, 568, 459, 407, - /* 1460 */ 1017, 1017, 1019, 239, 407, 86, 213, 1350, 52, 52, - /* 1470 */ 68, 68, 1017, 1017, 1019, 1020, 27, 1585, 1180, 447, - /* 1480 */ 69, 69, 288, 97, 108, 1541, 106, 392, 392, 391, - /* 1490 */ 273, 389, 568, 879, 849, 883, 568, 111, 560, 466, - /* 1500 */ 4, 568, 152, 30, 38, 568, 1132, 234, 396, 323, - /* 1510 */ 111, 560, 527, 4, 563, 53, 53, 322, 568, 163, - /* 1520 */ 163, 568, 337, 468, 164, 164, 333, 563, 76, 76, - /* 1530 */ 568, 289, 1514, 568, 31, 1513, 568, 445, 338, 483, - /* 1540 */ 100, 54, 54, 344, 72, 72, 296, 236, 1080, 557, - /* 1550 */ 445, 879, 1360, 134, 134, 168, 73, 73, 141, 161, - /* 1560 */ 161, 1574, 557, 535, 568, 319, 568, 348, 536, 1009, - /* 1570 */ 473, 261, 261, 891, 890, 235, 535, 568, 1027, 568, - /* 1580 */ 475, 534, 261, 367, 109, 109, 521, 136, 136, 130, - /* 1590 */ 130, 1027, 110, 366, 445, 570, 569, 109, 109, 1017, - /* 1600 */ 162, 162, 156, 156, 568, 110, 1080, 445, 570, 569, - /* 1610 */ 410, 351, 1017, 568, 353, 316, 559, 568, 343, 568, - /* 1620 */ 100, 497, 357, 258, 100, 898, 899, 140, 140, 355, - /* 1630 */ 1310, 1017, 1017, 1019, 1020, 27, 139, 139, 362, 451, - /* 1640 */ 137, 137, 138, 138, 1017, 1017, 1019, 1020, 27, 1180, - /* 1650 */ 447, 568, 372, 288, 111, 560, 1021, 4, 392, 392, - /* 1660 */ 391, 273, 389, 568, 1141, 849, 568, 1076, 568, 258, - /* 1670 */ 492, 563, 568, 211, 75, 75, 555, 962, 234, 261, - /* 1680 */ 323, 111, 560, 929, 4, 113, 77, 77, 322, 74, - /* 1690 */ 74, 42, 42, 1373, 445, 48, 48, 1418, 563, 974, - /* 1700 */ 975, 1092, 1091, 1092, 1091, 862, 557, 150, 930, 1346, - /* 1710 */ 113, 1358, 554, 1424, 1021, 1275, 1266, 1254, 236, 1253, - /* 1720 */ 1255, 445, 1593, 1343, 308, 276, 168, 309, 11, 141, - /* 1730 */ 393, 310, 232, 557, 1405, 1027, 335, 291, 1400, 219, - /* 1740 */ 336, 109, 109, 936, 297, 1410, 235, 341, 477, 110, - /* 1750 */ 502, 445, 570, 569, 1393, 1409, 1017, 400, 1293, 365, - /* 1760 */ 223, 1486, 1027, 1485, 1355, 1356, 1354, 1353, 109, 109, - /* 1770 */ 204, 1596, 1232, 558, 265, 218, 110, 205, 445, 570, - /* 1780 */ 569, 410, 387, 1017, 1533, 179, 316, 559, 1017, 1017, - /* 1790 */ 1019, 1020, 27, 230, 1531, 1229, 79, 560, 85, 4, - /* 1800 */ 418, 215, 548, 81, 84, 188, 1406, 173, 181, 461, - /* 1810 */ 451, 35, 462, 563, 183, 1017, 1017, 1019, 1020, 27, - /* 1820 */ 184, 1491, 185, 186, 495, 242, 98, 398, 1412, 36, - /* 1830 */ 1411, 484, 91, 469, 401, 1414, 445, 192, 1480, 246, - /* 1840 */ 1502, 490, 346, 277, 248, 196, 493, 511, 557, 350, - /* 1850 */ 1256, 249, 250, 403, 1313, 1312, 111, 560, 432, 4, - /* 1860 */ 1311, 1304, 93, 1611, 883, 1610, 224, 404, 434, 520, - /* 1870 */ 263, 435, 1579, 563, 1283, 1282, 364, 1027, 306, 1281, - /* 1880 */ 264, 1609, 1565, 109, 109, 370, 1303, 307, 1564, 438, - /* 1890 */ 128, 110, 1378, 445, 570, 569, 445, 546, 1017, 10, - /* 1900 */ 1466, 105, 381, 1377, 34, 572, 99, 1336, 557, 314, - /* 1910 */ 1186, 530, 272, 274, 379, 210, 1335, 547, 385, 386, - /* 1920 */ 275, 573, 1251, 1246, 411, 412, 1518, 165, 178, 1519, - /* 1930 */ 1017, 1017, 1019, 1020, 27, 1517, 1516, 1027, 78, 147, - /* 1940 */ 166, 220, 221, 109, 109, 836, 304, 167, 446, 212, - /* 1950 */ 318, 110, 231, 445, 570, 569, 144, 1090, 1017, 1088, - /* 1960 */ 326, 180, 169, 1212, 182, 334, 238, 915, 241, 1104, - /* 1970 */ 187, 170, 171, 421, 87, 88, 423, 189, 89, 90, - /* 1980 */ 172, 1107, 243, 1103, 244, 158, 18, 245, 345, 247, - /* 1990 */ 1017, 1017, 1019, 1020, 27, 261, 1096, 193, 1226, 489, - /* 2000 */ 194, 37, 366, 851, 494, 251, 195, 506, 92, 19, - /* 2010 */ 498, 358, 20, 503, 881, 361, 94, 894, 305, 159, - /* 2020 */ 513, 39, 95, 1174, 160, 1056, 966, 1143, 96, 174, - /* 2030 */ 1142, 225, 280, 282, 198, 960, 113, 1164, 1160, 260, - /* 2040 */ 21, 22, 23, 1162, 1168, 1167, 1148, 24, 33, 25, - /* 2050 */ 202, 542, 26, 100, 1071, 102, 1057, 103, 7, 1055, - /* 2060 */ 1059, 1113, 1060, 1112, 266, 267, 28, 40, 390, 1022, - /* 2070 */ 863, 112, 29, 564, 1182, 1181, 268, 176, 143, 925, - /* 2080 */ 1242, 1242, 1242, 1242, 1242, 1242, 1242, 1242, 1242, 1242, - /* 2090 */ 1242, 1242, 1242, 1242, 269, 1602, 1242, 1601, + /* 0 */ 572, 210, 572, 119, 116, 231, 572, 119, 116, 231, + /* 10 */ 572, 1317, 379, 1296, 410, 566, 566, 566, 572, 411, + /* 20 */ 380, 1317, 1279, 42, 42, 42, 42, 210, 1529, 72, + /* 30 */ 72, 974, 421, 42, 42, 495, 305, 281, 305, 975, + /* 40 */ 399, 72, 72, 126, 127, 81, 1217, 1217, 1054, 1057, + /* 50 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 480, 411, + /* 60 */ 1244, 1, 1, 578, 2, 1248, 554, 119, 116, 231, + /* 70 */ 319, 484, 147, 484, 528, 119, 116, 231, 533, 1330, + /* 80 */ 419, 527, 143, 126, 127, 81, 1217, 1217, 1054, 1057, + /* 90 */ 1044, 1044, 124, 124, 125, 125, 125, 125, 119, 116, + /* 100 */ 231, 329, 123, 123, 123, 123, 122, 122, 121, 121, + /* 110 */ 121, 120, 117, 448, 286, 286, 286, 286, 446, 446, + /* 120 */ 446, 1568, 378, 1570, 1193, 377, 1164, 569, 1164, 569, + /* 130 */ 411, 1568, 541, 261, 228, 448, 102, 146, 453, 318, + /* 140 */ 563, 242, 123, 123, 123, 123, 122, 122, 121, 121, + /* 150 */ 121, 120, 117, 448, 126, 127, 81, 1217, 1217, 1054, + /* 160 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 143, + /* 170 */ 296, 1193, 341, 452, 121, 121, 121, 120, 117, 448, + /* 180 */ 128, 1193, 1194, 1193, 149, 445, 444, 572, 120, 117, + /* 190 */ 448, 125, 125, 125, 125, 118, 123, 123, 123, 123, + /* 200 */ 122, 122, 121, 121, 121, 120, 117, 448, 458, 114, + /* 210 */ 13, 13, 550, 123, 123, 123, 123, 122, 122, 121, + /* 220 */ 121, 121, 120, 117, 448, 424, 318, 563, 1193, 1194, + /* 230 */ 1193, 150, 1225, 411, 1225, 125, 125, 125, 125, 123, + /* 240 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, + /* 250 */ 448, 469, 344, 1041, 1041, 1055, 1058, 126, 127, 81, + /* 260 */ 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, + /* 270 */ 125, 125, 1282, 526, 224, 1193, 572, 411, 226, 519, + /* 280 */ 177, 83, 84, 123, 123, 123, 123, 122, 122, 121, + /* 290 */ 121, 121, 120, 117, 448, 1010, 16, 16, 1193, 134, + /* 300 */ 134, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, + /* 310 */ 124, 124, 125, 125, 125, 125, 123, 123, 123, 123, + /* 320 */ 122, 122, 121, 121, 121, 120, 117, 448, 1045, 550, + /* 330 */ 1193, 375, 1193, 1194, 1193, 254, 1438, 401, 508, 505, + /* 340 */ 504, 112, 564, 570, 4, 929, 929, 435, 503, 342, + /* 350 */ 464, 330, 362, 396, 1238, 1193, 1194, 1193, 567, 572, + /* 360 */ 123, 123, 123, 123, 122, 122, 121, 121, 121, 120, + /* 370 */ 117, 448, 286, 286, 371, 1581, 1607, 445, 444, 155, + /* 380 */ 411, 449, 72, 72, 1289, 569, 1222, 1193, 1194, 1193, + /* 390 */ 86, 1224, 273, 561, 547, 520, 520, 572, 99, 1223, + /* 400 */ 6, 1281, 476, 143, 126, 127, 81, 1217, 1217, 1054, + /* 410 */ 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, 554, + /* 420 */ 13, 13, 1031, 511, 1225, 1193, 1225, 553, 110, 110, + /* 430 */ 224, 572, 1239, 177, 572, 429, 111, 199, 449, 573, + /* 440 */ 449, 432, 1555, 1019, 327, 555, 1193, 272, 289, 370, + /* 450 */ 514, 365, 513, 259, 72, 72, 547, 72, 72, 361, + /* 460 */ 318, 563, 1613, 123, 123, 123, 123, 122, 122, 121, + /* 470 */ 121, 121, 120, 117, 448, 1019, 1019, 1021, 1022, 28, + /* 480 */ 286, 286, 1193, 1194, 1193, 1159, 572, 1612, 411, 904, + /* 490 */ 192, 554, 358, 569, 554, 940, 537, 521, 1159, 437, + /* 500 */ 415, 1159, 556, 1193, 1194, 1193, 572, 548, 548, 52, + /* 510 */ 52, 216, 126, 127, 81, 1217, 1217, 1054, 1057, 1044, + /* 520 */ 1044, 124, 124, 125, 125, 125, 125, 1193, 478, 136, + /* 530 */ 136, 411, 286, 286, 1493, 509, 122, 122, 121, 121, + /* 540 */ 121, 120, 117, 448, 1010, 569, 522, 219, 545, 545, + /* 550 */ 318, 563, 143, 6, 536, 126, 127, 81, 1217, 1217, + /* 560 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 570 */ 1557, 123, 123, 123, 123, 122, 122, 121, 121, 121, + /* 580 */ 120, 117, 448, 489, 1193, 1194, 1193, 486, 283, 1270, + /* 590 */ 960, 254, 1193, 375, 508, 505, 504, 1193, 342, 574, + /* 600 */ 1193, 574, 411, 294, 503, 960, 879, 193, 484, 318, + /* 610 */ 563, 386, 292, 382, 123, 123, 123, 123, 122, 122, + /* 620 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 630 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 640 */ 125, 411, 396, 1139, 1193, 872, 101, 286, 286, 1193, + /* 650 */ 1194, 1193, 375, 1096, 1193, 1194, 1193, 1193, 1194, 1193, + /* 660 */ 569, 459, 33, 375, 235, 126, 127, 81, 1217, 1217, + /* 670 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 680 */ 1437, 962, 572, 230, 961, 123, 123, 123, 123, 122, + /* 690 */ 122, 121, 121, 121, 120, 117, 448, 1159, 230, 1193, + /* 700 */ 158, 1193, 1194, 1193, 1556, 13, 13, 303, 960, 1233, + /* 710 */ 1159, 154, 411, 1159, 375, 1584, 1177, 5, 371, 1581, + /* 720 */ 431, 1239, 3, 960, 123, 123, 123, 123, 122, 122, + /* 730 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 740 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 750 */ 125, 411, 210, 571, 1193, 1032, 1193, 1194, 1193, 1193, + /* 760 */ 390, 855, 156, 1555, 376, 404, 1101, 1101, 492, 572, + /* 770 */ 469, 344, 1322, 1322, 1555, 126, 127, 81, 1217, 1217, + /* 780 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 790 */ 130, 572, 13, 13, 532, 123, 123, 123, 123, 122, + /* 800 */ 122, 121, 121, 121, 120, 117, 448, 304, 572, 457, + /* 810 */ 229, 1193, 1194, 1193, 13, 13, 1193, 1194, 1193, 1300, + /* 820 */ 467, 1270, 411, 1320, 1320, 1555, 1015, 457, 456, 436, + /* 830 */ 301, 72, 72, 1268, 123, 123, 123, 123, 122, 122, + /* 840 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 850 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 860 */ 125, 411, 384, 1076, 1159, 286, 286, 421, 314, 280, + /* 870 */ 280, 287, 287, 461, 408, 407, 1539, 1159, 569, 572, + /* 880 */ 1159, 1196, 569, 409, 569, 126, 127, 81, 1217, 1217, + /* 890 */ 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, 125, + /* 900 */ 457, 1485, 13, 13, 1541, 123, 123, 123, 123, 122, + /* 910 */ 122, 121, 121, 121, 120, 117, 448, 202, 572, 462, + /* 920 */ 1587, 578, 2, 1248, 843, 844, 845, 1563, 319, 409, + /* 930 */ 147, 6, 411, 257, 256, 255, 208, 1330, 9, 1196, + /* 940 */ 264, 72, 72, 1436, 123, 123, 123, 123, 122, 122, + /* 950 */ 121, 121, 121, 120, 117, 448, 126, 127, 81, 1217, + /* 960 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 970 */ 125, 572, 286, 286, 572, 1213, 411, 577, 315, 1248, + /* 980 */ 421, 371, 1581, 356, 319, 569, 147, 495, 529, 1644, + /* 990 */ 397, 935, 495, 1330, 71, 71, 934, 72, 72, 242, + /* 1000 */ 1328, 105, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, + /* 1010 */ 124, 125, 125, 125, 125, 123, 123, 123, 123, 122, + /* 1020 */ 122, 121, 121, 121, 120, 117, 448, 1117, 286, 286, + /* 1030 */ 1422, 452, 1528, 1213, 443, 286, 286, 1492, 1355, 313, + /* 1040 */ 478, 569, 1118, 454, 351, 495, 354, 1266, 569, 209, + /* 1050 */ 572, 418, 179, 572, 1031, 242, 385, 1119, 523, 123, + /* 1060 */ 123, 123, 123, 122, 122, 121, 121, 121, 120, 117, + /* 1070 */ 448, 1020, 108, 72, 72, 1019, 13, 13, 915, 572, + /* 1080 */ 1498, 572, 286, 286, 98, 530, 1537, 452, 916, 1334, + /* 1090 */ 1329, 203, 411, 286, 286, 569, 152, 211, 1498, 1500, + /* 1100 */ 426, 569, 56, 56, 57, 57, 569, 1019, 1019, 1021, + /* 1110 */ 447, 572, 411, 531, 12, 297, 126, 127, 81, 1217, + /* 1120 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 1130 */ 125, 572, 411, 867, 15, 15, 126, 127, 81, 1217, + /* 1140 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 1150 */ 125, 373, 529, 264, 44, 44, 126, 115, 81, 1217, + /* 1160 */ 1217, 1054, 1057, 1044, 1044, 124, 124, 125, 125, 125, + /* 1170 */ 125, 1498, 478, 1271, 417, 123, 123, 123, 123, 122, + /* 1180 */ 122, 121, 121, 121, 120, 117, 448, 205, 1213, 495, + /* 1190 */ 430, 867, 468, 322, 495, 123, 123, 123, 123, 122, + /* 1200 */ 122, 121, 121, 121, 120, 117, 448, 572, 557, 1140, + /* 1210 */ 1642, 1422, 1642, 543, 572, 123, 123, 123, 123, 122, + /* 1220 */ 122, 121, 121, 121, 120, 117, 448, 572, 1422, 572, + /* 1230 */ 13, 13, 542, 323, 1325, 411, 334, 58, 58, 349, + /* 1240 */ 1422, 1170, 326, 286, 286, 549, 1213, 300, 895, 530, + /* 1250 */ 45, 45, 59, 59, 1140, 1643, 569, 1643, 565, 417, + /* 1260 */ 127, 81, 1217, 1217, 1054, 1057, 1044, 1044, 124, 124, + /* 1270 */ 125, 125, 125, 125, 1367, 373, 500, 290, 1193, 512, + /* 1280 */ 1366, 427, 394, 394, 393, 275, 391, 896, 1138, 852, + /* 1290 */ 478, 258, 1422, 1170, 463, 1159, 12, 331, 428, 333, + /* 1300 */ 1117, 460, 236, 258, 325, 460, 544, 1544, 1159, 1098, + /* 1310 */ 491, 1159, 324, 1098, 440, 1118, 335, 516, 123, 123, + /* 1320 */ 123, 123, 122, 122, 121, 121, 121, 120, 117, 448, + /* 1330 */ 1119, 318, 563, 1138, 572, 1193, 1194, 1193, 112, 564, + /* 1340 */ 201, 4, 238, 433, 935, 490, 285, 228, 1517, 934, + /* 1350 */ 170, 560, 572, 142, 1516, 567, 572, 60, 60, 572, + /* 1360 */ 416, 572, 441, 572, 535, 302, 875, 8, 487, 572, + /* 1370 */ 237, 572, 416, 572, 485, 61, 61, 572, 449, 62, + /* 1380 */ 62, 332, 63, 63, 46, 46, 47, 47, 361, 572, + /* 1390 */ 561, 572, 48, 48, 50, 50, 51, 51, 572, 295, + /* 1400 */ 64, 64, 482, 295, 539, 412, 471, 1031, 572, 538, + /* 1410 */ 318, 563, 65, 65, 66, 66, 409, 475, 572, 1031, + /* 1420 */ 572, 14, 14, 875, 1020, 110, 110, 409, 1019, 572, + /* 1430 */ 474, 67, 67, 111, 455, 449, 573, 449, 98, 317, + /* 1440 */ 1019, 132, 132, 133, 133, 572, 1561, 572, 974, 409, + /* 1450 */ 6, 1562, 68, 68, 1560, 6, 975, 572, 6, 1559, + /* 1460 */ 1019, 1019, 1021, 6, 346, 218, 101, 531, 53, 53, + /* 1470 */ 69, 69, 1019, 1019, 1021, 1022, 28, 1586, 1181, 451, + /* 1480 */ 70, 70, 290, 87, 215, 31, 1363, 394, 394, 393, + /* 1490 */ 275, 391, 350, 109, 852, 107, 572, 112, 564, 483, + /* 1500 */ 4, 1212, 572, 239, 153, 572, 39, 236, 1299, 325, + /* 1510 */ 112, 564, 1298, 4, 567, 572, 32, 324, 572, 54, + /* 1520 */ 54, 572, 1135, 353, 398, 165, 165, 567, 166, 166, + /* 1530 */ 572, 291, 355, 572, 17, 357, 572, 449, 77, 77, + /* 1540 */ 1313, 55, 55, 1297, 73, 73, 572, 238, 470, 561, + /* 1550 */ 449, 472, 364, 135, 135, 170, 74, 74, 142, 163, + /* 1560 */ 163, 374, 561, 539, 572, 321, 572, 886, 540, 137, + /* 1570 */ 137, 339, 1353, 422, 298, 237, 539, 572, 1031, 572, + /* 1580 */ 340, 538, 101, 369, 110, 110, 162, 131, 131, 164, + /* 1590 */ 164, 1031, 111, 368, 449, 573, 449, 110, 110, 1019, + /* 1600 */ 157, 157, 141, 141, 572, 111, 572, 449, 573, 449, + /* 1610 */ 412, 288, 1019, 572, 882, 318, 563, 572, 219, 572, + /* 1620 */ 241, 1012, 477, 263, 263, 894, 893, 140, 140, 138, + /* 1630 */ 138, 1019, 1019, 1021, 1022, 28, 139, 139, 525, 455, + /* 1640 */ 76, 76, 78, 78, 1019, 1019, 1021, 1022, 28, 1181, + /* 1650 */ 451, 572, 1083, 290, 112, 564, 1575, 4, 394, 394, + /* 1660 */ 393, 275, 391, 572, 1023, 852, 572, 479, 345, 263, + /* 1670 */ 101, 567, 882, 1376, 75, 75, 1421, 501, 236, 260, + /* 1680 */ 325, 112, 564, 359, 4, 101, 43, 43, 324, 49, + /* 1690 */ 49, 901, 902, 161, 449, 101, 977, 978, 567, 1079, + /* 1700 */ 1349, 260, 965, 932, 263, 114, 561, 1095, 517, 1095, + /* 1710 */ 1083, 1094, 865, 1094, 151, 933, 1144, 114, 238, 1361, + /* 1720 */ 558, 449, 1023, 559, 1426, 1278, 170, 1269, 1257, 142, + /* 1730 */ 1601, 1256, 1258, 561, 1594, 1031, 496, 278, 213, 1346, + /* 1740 */ 310, 110, 110, 939, 311, 312, 237, 11, 234, 111, + /* 1750 */ 221, 449, 573, 449, 293, 395, 1019, 1408, 337, 1403, + /* 1760 */ 1396, 338, 1031, 299, 343, 1413, 1412, 481, 110, 110, + /* 1770 */ 506, 402, 225, 1296, 206, 367, 111, 1358, 449, 573, + /* 1780 */ 449, 412, 1359, 1019, 1489, 1488, 318, 563, 1019, 1019, + /* 1790 */ 1021, 1022, 28, 562, 207, 220, 80, 564, 389, 4, + /* 1800 */ 1597, 1357, 552, 1356, 1233, 181, 267, 232, 1536, 1534, + /* 1810 */ 455, 1230, 420, 567, 82, 1019, 1019, 1021, 1022, 28, + /* 1820 */ 86, 217, 85, 1494, 190, 175, 183, 465, 185, 466, + /* 1830 */ 36, 1409, 186, 187, 188, 499, 449, 244, 37, 99, + /* 1840 */ 400, 1415, 1414, 488, 1417, 194, 473, 403, 561, 1483, + /* 1850 */ 248, 92, 1505, 494, 198, 279, 112, 564, 250, 4, + /* 1860 */ 348, 497, 405, 352, 1259, 251, 252, 515, 1316, 434, + /* 1870 */ 1315, 1314, 94, 567, 1307, 886, 1306, 1031, 226, 406, + /* 1880 */ 1611, 1610, 438, 110, 110, 1580, 1286, 524, 439, 308, + /* 1890 */ 266, 111, 1285, 449, 573, 449, 449, 309, 1019, 366, + /* 1900 */ 1284, 1609, 265, 1566, 1565, 442, 372, 1381, 561, 129, + /* 1910 */ 550, 1380, 10, 1470, 383, 106, 316, 551, 100, 35, + /* 1920 */ 534, 575, 212, 1339, 381, 387, 1187, 1338, 274, 276, + /* 1930 */ 1019, 1019, 1021, 1022, 28, 277, 413, 1031, 576, 1254, + /* 1940 */ 388, 1521, 1249, 110, 110, 167, 1522, 168, 148, 1520, + /* 1950 */ 1519, 111, 306, 449, 573, 449, 222, 223, 1019, 839, + /* 1960 */ 169, 79, 450, 214, 414, 233, 320, 145, 1093, 1091, + /* 1970 */ 328, 182, 171, 1212, 918, 184, 240, 336, 243, 1107, + /* 1980 */ 189, 172, 173, 423, 425, 88, 180, 191, 89, 90, + /* 1990 */ 1019, 1019, 1021, 1022, 28, 91, 174, 1110, 245, 1106, + /* 2000 */ 246, 159, 18, 247, 347, 1099, 263, 195, 1227, 493, + /* 2010 */ 249, 196, 38, 854, 498, 368, 253, 360, 897, 197, + /* 2020 */ 502, 93, 19, 20, 507, 884, 363, 510, 95, 307, + /* 2030 */ 160, 96, 518, 97, 1175, 1060, 1146, 40, 21, 227, + /* 2040 */ 176, 1145, 282, 284, 969, 200, 963, 114, 262, 1165, + /* 2050 */ 22, 23, 24, 1161, 1169, 25, 1163, 1150, 34, 26, + /* 2060 */ 1168, 546, 27, 204, 101, 103, 104, 1074, 7, 1061, + /* 2070 */ 1059, 1063, 1116, 1064, 1115, 268, 269, 29, 41, 270, + /* 2080 */ 1024, 866, 113, 30, 568, 392, 1183, 144, 178, 1182, + /* 2090 */ 271, 928, 1245, 1245, 1245, 1245, 1245, 1245, 1245, 1602, }; static const YYCODETYPE yy_lookahead[] = { /* 0 */ 193, 193, 193, 274, 275, 276, 193, 274, 275, 276, @@ -169760,7 +177490,7 @@ static const YYCODETYPE yy_lookahead[] = { /* 730 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 740 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, /* 750 */ 57, 19, 193, 193, 59, 23, 116, 117, 118, 59, - /* 760 */ 201, 21, 241, 304, 22, 206, 127, 128, 129, 193, + /* 760 */ 201, 21, 241, 304, 193, 206, 127, 128, 129, 193, /* 770 */ 128, 129, 235, 236, 304, 43, 44, 45, 46, 47, /* 780 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, /* 790 */ 22, 193, 216, 217, 193, 102, 103, 104, 105, 106, @@ -169771,129 +177501,129 @@ static const YYCODETYPE yy_lookahead[] = { /* 840 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 850 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, /* 860 */ 57, 19, 193, 123, 76, 239, 240, 193, 253, 239, - /* 870 */ 240, 239, 240, 193, 106, 107, 193, 89, 252, 193, - /* 880 */ 92, 59, 252, 141, 252, 43, 44, 45, 46, 47, + /* 870 */ 240, 239, 240, 244, 106, 107, 193, 89, 252, 193, + /* 880 */ 92, 59, 252, 254, 252, 43, 44, 45, 46, 47, /* 890 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, /* 900 */ 284, 161, 216, 217, 193, 102, 103, 104, 105, 106, - /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 16, - /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 25, + /* 910 */ 107, 108, 109, 110, 111, 112, 113, 231, 193, 244, + /* 920 */ 187, 188, 189, 190, 7, 8, 9, 309, 195, 254, /* 930 */ 197, 313, 19, 127, 128, 129, 262, 204, 22, 117, - /* 940 */ 24, 216, 217, 263, 102, 103, 104, 105, 106, 107, + /* 940 */ 24, 216, 217, 273, 102, 103, 104, 105, 106, 107, /* 950 */ 108, 109, 110, 111, 112, 113, 43, 44, 45, 46, /* 960 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, /* 970 */ 57, 193, 239, 240, 193, 59, 19, 188, 253, 190, - /* 980 */ 77, 226, 79, 193, 195, 252, 197, 193, 19, 301, - /* 990 */ 302, 193, 193, 204, 216, 217, 226, 216, 217, 266, + /* 980 */ 193, 311, 312, 16, 195, 252, 197, 193, 19, 301, + /* 990 */ 302, 135, 193, 204, 216, 217, 140, 216, 217, 266, /* 1000 */ 204, 159, 45, 46, 47, 48, 49, 50, 51, 52, /* 1010 */ 53, 54, 55, 56, 57, 102, 103, 104, 105, 106, /* 1020 */ 107, 108, 109, 110, 111, 112, 113, 12, 239, 240, - /* 1030 */ 232, 298, 238, 117, 253, 239, 240, 238, 259, 260, - /* 1040 */ 193, 252, 27, 31, 193, 193, 142, 204, 252, 193, - /* 1050 */ 193, 39, 262, 193, 100, 266, 278, 42, 204, 102, + /* 1030 */ 193, 298, 238, 117, 253, 239, 240, 238, 259, 260, + /* 1040 */ 193, 252, 27, 193, 77, 193, 79, 204, 252, 262, + /* 1050 */ 193, 299, 300, 193, 100, 266, 278, 42, 204, 102, /* 1060 */ 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, /* 1070 */ 113, 117, 159, 216, 217, 121, 216, 217, 63, 193, - /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 238, + /* 1080 */ 193, 193, 239, 240, 115, 116, 193, 298, 73, 240, /* 1090 */ 238, 231, 19, 239, 240, 252, 22, 24, 211, 212, - /* 1100 */ 24, 193, 216, 217, 216, 217, 252, 153, 154, 155, - /* 1110 */ 253, 16, 19, 144, 213, 268, 43, 44, 45, 46, + /* 1100 */ 263, 252, 216, 217, 216, 217, 252, 153, 154, 155, + /* 1110 */ 253, 193, 19, 144, 213, 268, 43, 44, 45, 46, /* 1120 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1130 */ 57, 238, 19, 59, 193, 59, 43, 44, 45, 46, + /* 1130 */ 57, 193, 19, 59, 216, 217, 43, 44, 45, 46, /* 1140 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1150 */ 57, 22, 23, 193, 25, 193, 43, 44, 45, 46, + /* 1150 */ 57, 193, 19, 24, 216, 217, 43, 44, 45, 46, /* 1160 */ 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, - /* 1170 */ 57, 284, 77, 193, 79, 102, 103, 104, 105, 106, - /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 193, 193, - /* 1190 */ 193, 117, 291, 117, 232, 102, 103, 104, 105, 106, - /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 204, 22, 23, - /* 1210 */ 66, 25, 216, 217, 35, 102, 103, 104, 105, 106, - /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 268, 85, - /* 1230 */ 101, 193, 309, 309, 240, 19, 313, 313, 94, 208, - /* 1240 */ 209, 193, 239, 240, 193, 66, 252, 19, 268, 244, - /* 1250 */ 216, 217, 193, 74, 213, 252, 161, 19, 263, 254, + /* 1170 */ 57, 284, 193, 208, 209, 102, 103, 104, 105, 106, + /* 1180 */ 107, 108, 109, 110, 111, 112, 113, 286, 59, 193, + /* 1190 */ 232, 117, 291, 193, 193, 102, 103, 104, 105, 106, + /* 1200 */ 107, 108, 109, 110, 111, 112, 113, 193, 204, 22, + /* 1210 */ 23, 193, 25, 66, 193, 102, 103, 104, 105, 106, + /* 1220 */ 107, 108, 109, 110, 111, 112, 113, 193, 193, 193, + /* 1230 */ 216, 217, 85, 193, 238, 19, 16, 216, 217, 238, + /* 1240 */ 193, 94, 193, 239, 240, 231, 117, 268, 35, 116, + /* 1250 */ 216, 217, 216, 217, 22, 23, 252, 25, 208, 209, /* 1260 */ 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, - /* 1270 */ 54, 55, 56, 57, 193, 216, 217, 5, 59, 193, - /* 1280 */ 19, 244, 10, 11, 12, 13, 14, 101, 309, 17, - /* 1290 */ 146, 254, 313, 193, 193, 76, 115, 216, 217, 309, - /* 1300 */ 12, 263, 30, 313, 32, 46, 87, 46, 89, 130, - /* 1310 */ 193, 92, 40, 22, 263, 27, 216, 217, 102, 103, + /* 1270 */ 54, 55, 56, 57, 193, 193, 19, 5, 59, 66, + /* 1280 */ 193, 263, 10, 11, 12, 13, 14, 74, 101, 17, + /* 1290 */ 193, 46, 193, 146, 193, 76, 213, 77, 263, 79, + /* 1300 */ 12, 260, 30, 46, 32, 264, 87, 193, 89, 29, + /* 1310 */ 263, 92, 40, 33, 232, 27, 193, 108, 102, 103, /* 1320 */ 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, - /* 1330 */ 42, 150, 291, 216, 217, 116, 117, 118, 19, 20, - /* 1340 */ 193, 22, 70, 260, 116, 193, 24, 264, 193, 263, - /* 1350 */ 78, 63, 61, 81, 116, 36, 193, 260, 193, 29, - /* 1360 */ 193, 264, 193, 33, 145, 193, 59, 48, 216, 217, - /* 1370 */ 98, 216, 217, 193, 115, 193, 115, 193, 59, 216, - /* 1380 */ 217, 216, 217, 216, 217, 216, 217, 255, 216, 217, - /* 1390 */ 71, 193, 131, 193, 25, 65, 216, 217, 216, 217, - /* 1400 */ 216, 217, 208, 209, 85, 133, 193, 100, 193, 90, - /* 1410 */ 138, 139, 138, 139, 216, 217, 216, 217, 193, 100, - /* 1420 */ 193, 108, 135, 116, 117, 106, 107, 140, 121, 216, - /* 1430 */ 217, 216, 217, 114, 162, 116, 117, 118, 299, 300, - /* 1440 */ 121, 216, 217, 216, 217, 193, 244, 193, 135, 244, - /* 1450 */ 193, 256, 257, 140, 244, 193, 254, 193, 193, 254, - /* 1460 */ 153, 154, 155, 141, 254, 149, 150, 258, 216, 217, + /* 1330 */ 42, 138, 139, 101, 193, 116, 117, 118, 19, 20, + /* 1340 */ 255, 22, 70, 130, 135, 65, 256, 257, 193, 140, + /* 1350 */ 78, 63, 193, 81, 193, 36, 193, 216, 217, 193, + /* 1360 */ 115, 193, 263, 193, 145, 268, 59, 48, 193, 193, + /* 1370 */ 98, 193, 115, 193, 291, 216, 217, 193, 59, 216, + /* 1380 */ 217, 161, 216, 217, 216, 217, 216, 217, 131, 193, + /* 1390 */ 71, 193, 216, 217, 216, 217, 216, 217, 193, 260, + /* 1400 */ 216, 217, 19, 264, 85, 133, 244, 100, 193, 90, + /* 1410 */ 138, 139, 216, 217, 216, 217, 254, 244, 193, 100, + /* 1420 */ 193, 216, 217, 116, 117, 106, 107, 254, 121, 193, + /* 1430 */ 115, 216, 217, 114, 162, 116, 117, 118, 115, 244, + /* 1440 */ 121, 216, 217, 216, 217, 193, 309, 193, 31, 254, + /* 1450 */ 313, 309, 216, 217, 309, 313, 39, 193, 313, 309, + /* 1460 */ 153, 154, 155, 313, 193, 150, 25, 144, 216, 217, /* 1470 */ 216, 217, 153, 154, 155, 156, 157, 0, 1, 2, - /* 1480 */ 216, 217, 5, 115, 158, 193, 160, 10, 11, 12, - /* 1490 */ 13, 14, 193, 59, 17, 126, 193, 19, 20, 129, - /* 1500 */ 22, 193, 22, 22, 24, 193, 23, 30, 25, 32, - /* 1510 */ 19, 20, 144, 22, 36, 216, 217, 40, 193, 216, - /* 1520 */ 217, 193, 152, 129, 216, 217, 193, 36, 216, 217, - /* 1530 */ 193, 99, 193, 193, 53, 193, 193, 59, 23, 193, - /* 1540 */ 25, 216, 217, 193, 216, 217, 152, 70, 59, 71, - /* 1550 */ 59, 117, 193, 216, 217, 78, 216, 217, 81, 216, - /* 1560 */ 217, 318, 71, 85, 193, 133, 193, 193, 90, 23, - /* 1570 */ 23, 25, 25, 120, 121, 98, 85, 193, 100, 193, - /* 1580 */ 23, 90, 25, 121, 106, 107, 19, 216, 217, 216, + /* 1480 */ 216, 217, 5, 149, 150, 22, 193, 10, 11, 12, + /* 1490 */ 13, 14, 193, 158, 17, 160, 193, 19, 20, 116, + /* 1500 */ 22, 25, 193, 24, 22, 193, 24, 30, 226, 32, + /* 1510 */ 19, 20, 226, 22, 36, 193, 53, 40, 193, 216, + /* 1520 */ 217, 193, 23, 193, 25, 216, 217, 36, 216, 217, + /* 1530 */ 193, 99, 193, 193, 22, 193, 193, 59, 216, 217, + /* 1540 */ 193, 216, 217, 193, 216, 217, 193, 70, 129, 71, + /* 1550 */ 59, 129, 193, 216, 217, 78, 216, 217, 81, 216, + /* 1560 */ 217, 193, 71, 85, 193, 133, 193, 126, 90, 216, + /* 1570 */ 217, 152, 258, 61, 152, 98, 85, 193, 100, 193, + /* 1580 */ 23, 90, 25, 121, 106, 107, 23, 216, 217, 216, /* 1590 */ 217, 100, 114, 131, 116, 117, 118, 106, 107, 121, - /* 1600 */ 216, 217, 216, 217, 193, 114, 117, 116, 117, 118, - /* 1610 */ 133, 193, 121, 193, 193, 138, 139, 193, 23, 193, - /* 1620 */ 25, 23, 23, 25, 25, 7, 8, 216, 217, 193, - /* 1630 */ 193, 153, 154, 155, 156, 157, 216, 217, 193, 162, + /* 1600 */ 216, 217, 216, 217, 193, 114, 193, 116, 117, 118, + /* 1610 */ 133, 22, 121, 193, 59, 138, 139, 193, 142, 193, + /* 1620 */ 141, 23, 23, 25, 25, 120, 121, 216, 217, 216, + /* 1630 */ 217, 153, 154, 155, 156, 157, 216, 217, 19, 162, /* 1640 */ 216, 217, 216, 217, 153, 154, 155, 156, 157, 1, - /* 1650 */ 2, 193, 193, 5, 19, 20, 59, 22, 10, 11, - /* 1660 */ 12, 13, 14, 193, 97, 17, 193, 23, 193, 25, - /* 1670 */ 288, 36, 193, 242, 216, 217, 236, 23, 30, 25, + /* 1650 */ 2, 193, 59, 5, 19, 20, 318, 22, 10, 11, + /* 1660 */ 12, 13, 14, 193, 59, 17, 193, 23, 23, 25, + /* 1670 */ 25, 36, 117, 193, 216, 217, 193, 23, 30, 25, /* 1680 */ 32, 19, 20, 23, 22, 25, 216, 217, 40, 216, - /* 1690 */ 217, 216, 217, 193, 59, 216, 217, 193, 36, 83, - /* 1700 */ 84, 153, 153, 155, 155, 23, 71, 25, 23, 193, - /* 1710 */ 25, 193, 193, 193, 117, 193, 193, 193, 70, 193, - /* 1720 */ 193, 59, 193, 255, 255, 287, 78, 255, 243, 81, - /* 1730 */ 191, 255, 297, 71, 271, 100, 293, 245, 267, 214, - /* 1740 */ 246, 106, 107, 108, 246, 271, 98, 245, 293, 114, - /* 1750 */ 220, 116, 117, 118, 267, 271, 121, 271, 225, 219, - /* 1760 */ 229, 219, 100, 219, 259, 259, 259, 259, 106, 107, - /* 1770 */ 249, 196, 60, 280, 141, 243, 114, 249, 116, 117, - /* 1780 */ 118, 133, 245, 121, 200, 297, 138, 139, 153, 154, - /* 1790 */ 155, 156, 157, 297, 200, 38, 19, 20, 151, 22, - /* 1800 */ 200, 150, 140, 294, 294, 22, 272, 43, 234, 18, - /* 1810 */ 162, 270, 200, 36, 237, 153, 154, 155, 156, 157, - /* 1820 */ 237, 283, 237, 237, 18, 199, 149, 246, 272, 270, - /* 1830 */ 272, 200, 158, 246, 246, 234, 59, 234, 246, 199, - /* 1840 */ 290, 62, 289, 200, 199, 22, 221, 115, 71, 200, - /* 1850 */ 200, 199, 199, 221, 218, 218, 19, 20, 64, 22, - /* 1860 */ 218, 227, 22, 224, 126, 224, 165, 221, 24, 305, - /* 1870 */ 200, 113, 312, 36, 218, 220, 218, 100, 282, 218, - /* 1880 */ 91, 218, 317, 106, 107, 221, 227, 282, 317, 82, - /* 1890 */ 148, 114, 265, 116, 117, 118, 59, 145, 121, 22, - /* 1900 */ 277, 158, 200, 265, 25, 202, 147, 250, 71, 279, - /* 1910 */ 13, 146, 194, 194, 249, 248, 250, 140, 247, 246, - /* 1920 */ 6, 192, 192, 192, 303, 303, 213, 207, 300, 213, - /* 1930 */ 153, 154, 155, 156, 157, 213, 213, 100, 213, 222, - /* 1940 */ 207, 214, 214, 106, 107, 4, 222, 207, 3, 22, - /* 1950 */ 163, 114, 15, 116, 117, 118, 16, 23, 121, 23, - /* 1960 */ 139, 151, 130, 25, 142, 16, 24, 20, 144, 1, - /* 1970 */ 142, 130, 130, 61, 53, 53, 37, 151, 53, 53, - /* 1980 */ 130, 116, 34, 1, 141, 5, 22, 115, 161, 141, - /* 1990 */ 153, 154, 155, 156, 157, 25, 68, 68, 75, 41, - /* 2000 */ 115, 24, 131, 20, 19, 125, 22, 96, 22, 22, - /* 2010 */ 67, 23, 22, 67, 59, 24, 22, 28, 67, 23, - /* 2020 */ 22, 22, 149, 23, 23, 23, 116, 23, 25, 37, - /* 2030 */ 97, 141, 23, 23, 22, 143, 25, 75, 88, 34, - /* 2040 */ 34, 34, 34, 86, 75, 93, 23, 34, 22, 34, - /* 2050 */ 25, 24, 34, 25, 23, 142, 23, 142, 44, 23, - /* 2060 */ 23, 23, 11, 23, 25, 22, 22, 22, 15, 23, - /* 2070 */ 23, 22, 22, 25, 1, 1, 141, 25, 23, 135, - /* 2080 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2090 */ 319, 319, 319, 319, 141, 141, 319, 141, 319, 319, + /* 1690 */ 217, 7, 8, 23, 59, 25, 83, 84, 36, 23, + /* 1700 */ 193, 25, 23, 23, 25, 25, 71, 153, 145, 155, + /* 1710 */ 117, 153, 23, 155, 25, 23, 97, 25, 70, 193, + /* 1720 */ 193, 59, 117, 236, 193, 193, 78, 193, 193, 81, + /* 1730 */ 141, 193, 193, 71, 193, 100, 288, 287, 242, 255, + /* 1740 */ 255, 106, 107, 108, 255, 255, 98, 243, 297, 114, + /* 1750 */ 214, 116, 117, 118, 245, 191, 121, 271, 293, 267, + /* 1760 */ 267, 246, 100, 246, 245, 271, 271, 293, 106, 107, + /* 1770 */ 220, 271, 229, 225, 249, 219, 114, 259, 116, 117, + /* 1780 */ 118, 133, 259, 121, 219, 219, 138, 139, 153, 154, + /* 1790 */ 155, 156, 157, 280, 249, 243, 19, 20, 245, 22, + /* 1800 */ 196, 259, 140, 259, 60, 297, 141, 297, 200, 200, + /* 1810 */ 162, 38, 200, 36, 294, 153, 154, 155, 156, 157, + /* 1820 */ 151, 150, 294, 283, 22, 43, 234, 18, 237, 200, + /* 1830 */ 270, 272, 237, 237, 237, 18, 59, 199, 270, 149, + /* 1840 */ 246, 272, 272, 200, 234, 234, 246, 246, 71, 246, + /* 1850 */ 199, 158, 290, 62, 22, 200, 19, 20, 199, 22, + /* 1860 */ 289, 221, 221, 200, 200, 199, 199, 115, 218, 64, + /* 1870 */ 218, 218, 22, 36, 227, 126, 227, 100, 165, 221, + /* 1880 */ 224, 224, 24, 106, 107, 312, 218, 305, 113, 282, + /* 1890 */ 91, 114, 220, 116, 117, 118, 59, 282, 121, 218, + /* 1900 */ 218, 218, 200, 317, 317, 82, 221, 265, 71, 148, + /* 1910 */ 145, 265, 22, 277, 200, 158, 279, 140, 147, 25, + /* 1920 */ 146, 202, 248, 250, 249, 247, 13, 250, 194, 194, + /* 1930 */ 153, 154, 155, 156, 157, 6, 303, 100, 192, 192, + /* 1940 */ 246, 213, 192, 106, 107, 207, 213, 207, 222, 213, + /* 1950 */ 213, 114, 222, 116, 117, 118, 214, 214, 121, 4, + /* 1960 */ 207, 213, 3, 22, 303, 15, 163, 16, 23, 23, + /* 1970 */ 139, 151, 130, 25, 20, 142, 24, 16, 144, 1, + /* 1980 */ 142, 130, 130, 61, 37, 53, 300, 151, 53, 53, + /* 1990 */ 153, 154, 155, 156, 157, 53, 130, 116, 34, 1, + /* 2000 */ 141, 5, 22, 115, 161, 68, 25, 68, 75, 41, + /* 2010 */ 141, 115, 24, 20, 19, 131, 125, 23, 28, 22, + /* 2020 */ 67, 22, 22, 22, 67, 59, 24, 96, 22, 67, + /* 2030 */ 23, 149, 22, 25, 23, 23, 23, 22, 34, 141, + /* 2040 */ 37, 97, 23, 23, 116, 22, 143, 25, 34, 75, + /* 2050 */ 34, 34, 34, 88, 75, 34, 86, 23, 22, 34, + /* 2060 */ 93, 24, 34, 25, 25, 142, 142, 23, 44, 23, + /* 2070 */ 23, 23, 23, 11, 23, 25, 22, 22, 22, 141, + /* 2080 */ 23, 23, 22, 22, 25, 15, 1, 23, 25, 1, + /* 2090 */ 141, 135, 319, 319, 319, 319, 319, 319, 319, 141, /* 2100 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2110 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2120 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, @@ -169912,176 +177642,177 @@ static const YYCODETYPE yy_lookahead[] = { /* 2250 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2260 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, /* 2270 */ 319, 319, 319, 319, 319, 319, 319, 319, 319, 319, - /* 2280 */ 319, 319, 319, + /* 2280 */ 319, 319, 319, 319, 319, }; -#define YY_SHIFT_COUNT (575) +#define YY_SHIFT_COUNT (578) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (2074) +#define YY_SHIFT_MAX (2088) static const unsigned short int yy_shift_ofst[] = { /* 0 */ 1648, 1477, 1272, 322, 322, 1, 1319, 1478, 1491, 1837, /* 10 */ 1837, 1837, 471, 0, 0, 214, 1093, 1837, 1837, 1837, /* 20 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 30 */ 271, 271, 1219, 1219, 216, 88, 1, 1, 1, 1, - /* 40 */ 1, 40, 111, 258, 361, 469, 512, 583, 622, 693, - /* 50 */ 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, 1093, + /* 30 */ 1837, 271, 271, 1219, 1219, 216, 88, 1, 1, 1, + /* 40 */ 1, 1, 40, 111, 258, 361, 469, 512, 583, 622, + /* 50 */ 693, 732, 803, 842, 913, 1073, 1093, 1093, 1093, 1093, /* 60 */ 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, 1093, - /* 70 */ 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, 1662, - /* 80 */ 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, + /* 70 */ 1093, 1093, 1093, 1093, 1113, 1093, 1216, 957, 957, 1635, + /* 80 */ 1662, 1777, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 90 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 100 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 110 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, /* 120 */ 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, 1837, - /* 130 */ 137, 181, 181, 181, 181, 181, 181, 181, 94, 430, - /* 140 */ 66, 65, 112, 366, 533, 533, 740, 1261, 533, 533, - /* 150 */ 79, 79, 533, 412, 412, 412, 77, 412, 123, 113, - /* 160 */ 113, 22, 22, 2098, 2098, 328, 328, 328, 239, 468, - /* 170 */ 468, 468, 468, 1015, 1015, 409, 366, 1129, 1186, 533, - /* 180 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 969, - /* 200 */ 621, 621, 533, 642, 788, 788, 1228, 1228, 822, 822, - /* 210 */ 67, 1274, 2098, 2098, 2098, 2098, 2098, 2098, 2098, 1307, - /* 220 */ 954, 954, 585, 472, 640, 387, 695, 538, 541, 700, - /* 230 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 240 */ 222, 533, 533, 533, 533, 533, 533, 533, 533, 533, - /* 250 */ 533, 533, 533, 1179, 1179, 1179, 533, 533, 533, 565, - /* 260 */ 533, 533, 533, 916, 1144, 533, 533, 1288, 533, 533, - /* 270 */ 533, 533, 533, 533, 533, 533, 639, 1330, 209, 1076, - /* 280 */ 1076, 1076, 1076, 580, 209, 209, 1313, 768, 917, 649, - /* 290 */ 1181, 1316, 405, 1316, 1238, 249, 1181, 1181, 249, 1181, - /* 300 */ 405, 1238, 1369, 464, 1259, 1012, 1012, 1012, 1368, 1368, - /* 310 */ 1368, 1368, 184, 184, 1326, 904, 1287, 1480, 1712, 1712, - /* 320 */ 1633, 1633, 1757, 1757, 1633, 1647, 1651, 1783, 1764, 1791, - /* 330 */ 1791, 1791, 1791, 1633, 1806, 1677, 1651, 1651, 1677, 1783, - /* 340 */ 1764, 1677, 1764, 1677, 1633, 1806, 1674, 1779, 1633, 1806, - /* 350 */ 1823, 1633, 1806, 1633, 1806, 1823, 1732, 1732, 1732, 1794, - /* 360 */ 1840, 1840, 1823, 1732, 1738, 1732, 1794, 1732, 1732, 1701, - /* 370 */ 1844, 1758, 1758, 1823, 1633, 1789, 1789, 1807, 1807, 1742, - /* 380 */ 1752, 1877, 1633, 1743, 1742, 1759, 1765, 1677, 1879, 1897, - /* 390 */ 1897, 1914, 1914, 1914, 2098, 2098, 2098, 2098, 2098, 2098, - /* 400 */ 2098, 2098, 2098, 2098, 2098, 2098, 2098, 2098, 2098, 207, - /* 410 */ 1095, 331, 620, 903, 806, 1074, 1483, 1432, 1481, 1322, - /* 420 */ 1370, 1394, 1515, 1291, 1546, 1547, 1557, 1595, 1598, 1599, - /* 430 */ 1434, 1453, 1618, 1462, 1567, 1489, 1644, 1654, 1616, 1660, - /* 440 */ 1548, 1549, 1682, 1685, 1597, 742, 1941, 1945, 1927, 1787, - /* 450 */ 1937, 1940, 1934, 1936, 1821, 1810, 1832, 1938, 1938, 1942, - /* 460 */ 1822, 1947, 1824, 1949, 1968, 1828, 1841, 1938, 1842, 1912, - /* 470 */ 1939, 1938, 1826, 1921, 1922, 1925, 1926, 1850, 1865, 1948, - /* 480 */ 1843, 1982, 1980, 1964, 1872, 1827, 1928, 1970, 1929, 1923, - /* 490 */ 1958, 1848, 1885, 1977, 1983, 1985, 1871, 1880, 1984, 1943, - /* 500 */ 1986, 1987, 1988, 1990, 1946, 1955, 1991, 1911, 1989, 1994, - /* 510 */ 1951, 1992, 1996, 1873, 1998, 2000, 2001, 2002, 2003, 2004, - /* 520 */ 1999, 1933, 1890, 2009, 2010, 1910, 2005, 2012, 1892, 2011, - /* 530 */ 2006, 2007, 2008, 2013, 1950, 1962, 1957, 2014, 1969, 1952, - /* 540 */ 2015, 2023, 2026, 2027, 2025, 2028, 2018, 1913, 1915, 2031, - /* 550 */ 2011, 2033, 2036, 2037, 2038, 2039, 2040, 2043, 2051, 2044, - /* 560 */ 2045, 2046, 2047, 2049, 2050, 2048, 1944, 1935, 1953, 1954, - /* 570 */ 1956, 2052, 2055, 2053, 2073, 2074, + /* 130 */ 1837, 137, 181, 181, 181, 181, 181, 181, 181, 94, + /* 140 */ 430, 66, 65, 112, 366, 533, 533, 740, 1257, 533, + /* 150 */ 533, 79, 79, 533, 412, 412, 412, 77, 412, 123, + /* 160 */ 113, 113, 113, 22, 22, 2100, 2100, 328, 328, 328, + /* 170 */ 239, 468, 468, 468, 468, 1015, 1015, 409, 366, 1187, + /* 180 */ 1232, 533, 533, 533, 533, 533, 533, 533, 533, 533, + /* 190 */ 533, 533, 533, 533, 533, 533, 533, 533, 533, 533, + /* 200 */ 533, 969, 621, 621, 533, 642, 788, 788, 1133, 1133, + /* 210 */ 822, 822, 67, 1193, 2100, 2100, 2100, 2100, 2100, 2100, + /* 220 */ 2100, 1307, 954, 954, 585, 472, 640, 387, 695, 538, + /* 230 */ 541, 700, 533, 533, 533, 533, 533, 533, 533, 533, + /* 240 */ 533, 533, 222, 533, 533, 533, 533, 533, 533, 533, + /* 250 */ 533, 533, 533, 533, 533, 1213, 1213, 1213, 533, 533, + /* 260 */ 533, 565, 533, 533, 533, 916, 1147, 533, 533, 1288, + /* 270 */ 533, 533, 533, 533, 533, 533, 533, 533, 639, 1280, + /* 280 */ 209, 1129, 1129, 1129, 1129, 580, 209, 209, 1209, 768, + /* 290 */ 917, 649, 1315, 1334, 405, 1334, 1383, 249, 1315, 1315, + /* 300 */ 249, 1315, 405, 1383, 1441, 464, 1245, 1417, 1417, 1417, + /* 310 */ 1323, 1323, 1323, 1323, 184, 184, 1335, 1476, 856, 1482, + /* 320 */ 1744, 1744, 1665, 1665, 1773, 1773, 1665, 1669, 1671, 1802, + /* 330 */ 1782, 1809, 1809, 1809, 1809, 1665, 1817, 1690, 1671, 1671, + /* 340 */ 1690, 1802, 1782, 1690, 1782, 1690, 1665, 1817, 1693, 1791, + /* 350 */ 1665, 1817, 1832, 1665, 1817, 1665, 1817, 1832, 1752, 1752, + /* 360 */ 1752, 1805, 1850, 1850, 1832, 1752, 1749, 1752, 1805, 1752, + /* 370 */ 1752, 1713, 1858, 1775, 1775, 1832, 1665, 1799, 1799, 1823, + /* 380 */ 1823, 1761, 1765, 1890, 1665, 1757, 1761, 1771, 1774, 1690, + /* 390 */ 1894, 1913, 1913, 1929, 1929, 1929, 2100, 2100, 2100, 2100, + /* 400 */ 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, 2100, + /* 410 */ 2100, 207, 1220, 331, 620, 967, 806, 1074, 1499, 1432, + /* 420 */ 1463, 1479, 1419, 1422, 1557, 1512, 1598, 1599, 1644, 1645, + /* 430 */ 1654, 1660, 1555, 1505, 1684, 1462, 1670, 1563, 1619, 1593, + /* 440 */ 1676, 1679, 1613, 1680, 1554, 1558, 1689, 1692, 1605, 1589, + /* 450 */ 1955, 1959, 1941, 1803, 1950, 1951, 1945, 1946, 1831, 1820, + /* 460 */ 1842, 1948, 1948, 1952, 1833, 1954, 1834, 1961, 1978, 1838, + /* 470 */ 1851, 1948, 1852, 1922, 1947, 1948, 1836, 1932, 1935, 1936, + /* 480 */ 1942, 1866, 1881, 1964, 1859, 1998, 1996, 1980, 1888, 1843, + /* 490 */ 1937, 1981, 1939, 1933, 1968, 1869, 1896, 1988, 1993, 1995, + /* 500 */ 1884, 1891, 1997, 1953, 1999, 2000, 1994, 2001, 1957, 1966, + /* 510 */ 2002, 1931, 1990, 2006, 1962, 2003, 2007, 2004, 1882, 2010, + /* 520 */ 2011, 2012, 2008, 2013, 2015, 1944, 1898, 2019, 2020, 1928, + /* 530 */ 2014, 2023, 1903, 2022, 2016, 2017, 2018, 2021, 1965, 1974, + /* 540 */ 1970, 2024, 1979, 1967, 2025, 2034, 2036, 2037, 2038, 2039, + /* 550 */ 2028, 1923, 1924, 2044, 2022, 2046, 2047, 2048, 2049, 2050, + /* 560 */ 2051, 2054, 2062, 2055, 2056, 2057, 2058, 2060, 2061, 2059, + /* 570 */ 1956, 1938, 1949, 1958, 2063, 2064, 2070, 2085, 2088, }; -#define YY_REDUCE_COUNT (408) +#define YY_REDUCE_COUNT (410) #define YY_REDUCE_MIN (-271) -#define YY_REDUCE_MAX (1740) +#define YY_REDUCE_MAX (1753) static const short yy_reduce_ofst[] = { /* 0 */ -125, 733, 789, 241, 293, -123, -193, -191, -183, -187, /* 10 */ 166, 238, 133, -207, -199, -267, -176, -6, 204, 489, - /* 20 */ 576, -175, 598, 686, 615, 725, 860, 778, 781, 857, - /* 30 */ 616, 887, 87, 240, -192, 408, 626, 796, 843, 854, - /* 40 */ 1003, -271, -271, -271, -271, -271, -271, -271, -271, -271, + /* 20 */ 576, 598, -175, 686, 860, 615, 725, 1014, 778, 781, + /* 30 */ 857, 616, 887, 87, 240, -192, 408, 626, 796, 843, + /* 40 */ 854, 1004, -271, -271, -271, -271, -271, -271, -271, -271, /* 50 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, /* 60 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, 80, 83, - /* 80 */ 313, 886, 888, 996, 1034, 1059, 1081, 1100, 1117, 1152, - /* 90 */ 1155, 1163, 1165, 1167, 1169, 1172, 1180, 1182, 1184, 1198, - /* 100 */ 1200, 1213, 1215, 1225, 1227, 1252, 1254, 1264, 1299, 1303, - /* 110 */ 1308, 1312, 1325, 1328, 1337, 1340, 1343, 1371, 1373, 1384, - /* 120 */ 1386, 1411, 1420, 1424, 1426, 1458, 1470, 1473, 1475, 1479, - /* 130 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, -271, - /* 140 */ -271, 138, 459, 396, -158, 470, 302, -212, 521, 201, - /* 150 */ -195, -92, 559, 630, 632, 630, -271, 632, 901, 63, - /* 160 */ 407, -271, -271, -271, -271, 161, 161, 161, 251, 335, - /* 170 */ 847, 960, 980, 537, 588, 618, 628, 688, 688, -166, - /* 180 */ -161, 674, 790, 794, 799, 851, 852, -122, 680, -120, - /* 190 */ 995, 1038, 415, 1051, 893, 798, 962, 400, 1086, 779, - /* 200 */ 923, 924, 263, 1041, 979, 990, 1083, 1097, 1031, 1194, - /* 210 */ 362, 994, 1139, 1005, 1037, 1202, 1205, 1195, 1210, -194, - /* 220 */ 56, 185, -135, 232, 522, 560, 601, 617, 669, 683, - /* 230 */ 711, 856, 908, 941, 1048, 1101, 1147, 1257, 1262, 1265, - /* 240 */ 392, 1292, 1333, 1339, 1342, 1346, 1350, 1359, 1374, 1418, - /* 250 */ 1421, 1436, 1437, 593, 755, 770, 997, 1445, 1459, 1209, - /* 260 */ 1500, 1504, 1516, 1132, 1243, 1518, 1519, 1440, 1520, 560, - /* 270 */ 1522, 1523, 1524, 1526, 1527, 1529, 1382, 1438, 1431, 1468, - /* 280 */ 1469, 1472, 1476, 1209, 1431, 1431, 1485, 1525, 1539, 1435, - /* 290 */ 1463, 1471, 1492, 1487, 1443, 1494, 1474, 1484, 1498, 1486, - /* 300 */ 1502, 1455, 1530, 1531, 1533, 1540, 1542, 1544, 1505, 1506, - /* 310 */ 1507, 1508, 1521, 1528, 1493, 1537, 1532, 1575, 1488, 1496, - /* 320 */ 1584, 1594, 1509, 1510, 1600, 1538, 1534, 1541, 1574, 1577, - /* 330 */ 1583, 1585, 1586, 1612, 1626, 1581, 1556, 1558, 1587, 1559, - /* 340 */ 1601, 1588, 1603, 1592, 1631, 1640, 1550, 1553, 1643, 1645, - /* 350 */ 1625, 1649, 1652, 1650, 1653, 1632, 1636, 1637, 1642, 1634, - /* 360 */ 1639, 1641, 1646, 1656, 1655, 1658, 1659, 1661, 1663, 1560, - /* 370 */ 1564, 1596, 1605, 1664, 1670, 1565, 1571, 1627, 1638, 1657, - /* 380 */ 1665, 1623, 1702, 1630, 1666, 1667, 1671, 1673, 1703, 1718, - /* 390 */ 1719, 1729, 1730, 1731, 1621, 1622, 1628, 1720, 1713, 1716, - /* 400 */ 1722, 1723, 1733, 1717, 1724, 1727, 1728, 1725, 1740, + /* 70 */ -271, -271, -271, -271, -271, -271, -271, -271, -271, 80, + /* 80 */ 83, 313, 886, 888, 918, 938, 1021, 1034, 1036, 1141, + /* 90 */ 1159, 1163, 1166, 1168, 1170, 1176, 1178, 1180, 1184, 1196, + /* 100 */ 1198, 1205, 1215, 1225, 1227, 1236, 1252, 1254, 1264, 1303, + /* 110 */ 1309, 1312, 1322, 1325, 1328, 1337, 1340, 1343, 1353, 1371, + /* 120 */ 1373, 1384, 1386, 1411, 1413, 1420, 1424, 1426, 1458, 1470, + /* 130 */ 1473, -271, -271, -271, -271, -271, -271, -271, -271, -271, + /* 140 */ -271, -271, 138, 459, 396, -158, 470, 302, -212, 521, + /* 150 */ 201, -195, -92, 559, 630, 632, 630, -271, 632, 901, + /* 160 */ 63, 407, 670, -271, -271, -271, -271, 161, 161, 161, + /* 170 */ 251, 335, 847, 979, 1097, 537, 588, 618, 628, 688, + /* 180 */ 688, -166, -161, 674, 787, 794, 799, 852, 996, -122, + /* 190 */ 837, -120, 1018, 1035, 415, 1047, 1001, 958, 1082, 400, + /* 200 */ 1099, 779, 1137, 1142, 263, 1083, 1145, 1150, 1041, 1139, + /* 210 */ 965, 1050, 362, 849, 752, 629, 675, 1162, 1173, 1090, + /* 220 */ 1195, -194, 56, 185, -135, 232, 522, 560, 571, 601, + /* 230 */ 617, 669, 683, 711, 850, 893, 1000, 1040, 1049, 1081, + /* 240 */ 1087, 1101, 392, 1114, 1123, 1155, 1161, 1175, 1271, 1293, + /* 250 */ 1299, 1330, 1339, 1342, 1347, 593, 1282, 1286, 1350, 1359, + /* 260 */ 1368, 1314, 1480, 1483, 1507, 1085, 1338, 1526, 1527, 1487, + /* 270 */ 1531, 560, 1532, 1534, 1535, 1538, 1539, 1541, 1448, 1450, + /* 280 */ 1496, 1484, 1485, 1489, 1490, 1314, 1496, 1496, 1504, 1536, + /* 290 */ 1564, 1451, 1486, 1492, 1509, 1493, 1465, 1515, 1494, 1495, + /* 300 */ 1517, 1500, 1519, 1474, 1550, 1543, 1548, 1556, 1565, 1566, + /* 310 */ 1518, 1523, 1542, 1544, 1525, 1545, 1513, 1553, 1552, 1604, + /* 320 */ 1508, 1510, 1608, 1609, 1520, 1528, 1612, 1540, 1559, 1560, + /* 330 */ 1592, 1591, 1595, 1596, 1597, 1629, 1638, 1594, 1569, 1570, + /* 340 */ 1600, 1568, 1610, 1601, 1611, 1603, 1643, 1651, 1562, 1571, + /* 350 */ 1655, 1659, 1640, 1663, 1666, 1664, 1667, 1641, 1650, 1652, + /* 360 */ 1653, 1647, 1656, 1657, 1658, 1668, 1672, 1681, 1649, 1682, + /* 370 */ 1683, 1573, 1582, 1607, 1615, 1685, 1702, 1586, 1587, 1642, + /* 380 */ 1646, 1673, 1675, 1636, 1714, 1637, 1677, 1674, 1678, 1694, + /* 390 */ 1719, 1734, 1735, 1746, 1747, 1750, 1633, 1661, 1686, 1738, + /* 400 */ 1728, 1733, 1736, 1737, 1740, 1726, 1730, 1742, 1743, 1748, + /* 410 */ 1753, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 1647, 1647, 1647, 1475, 1240, 1351, 1240, 1240, 1240, 1475, - /* 10 */ 1475, 1475, 1240, 1381, 1381, 1528, 1273, 1240, 1240, 1240, - /* 20 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1474, 1240, 1240, - /* 30 */ 1240, 1240, 1563, 1563, 1240, 1240, 1240, 1240, 1240, 1240, - /* 40 */ 1240, 1240, 1390, 1240, 1397, 1240, 1240, 1240, 1240, 1240, - /* 50 */ 1476, 1477, 1240, 1240, 1240, 1527, 1529, 1492, 1404, 1403, - /* 60 */ 1402, 1401, 1510, 1369, 1395, 1388, 1392, 1470, 1471, 1469, - /* 70 */ 1473, 1477, 1476, 1240, 1391, 1438, 1454, 1437, 1240, 1240, - /* 80 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 90 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 100 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 110 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 120 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 130 */ 1446, 1453, 1452, 1451, 1460, 1450, 1447, 1440, 1439, 1441, - /* 140 */ 1442, 1240, 1240, 1264, 1240, 1240, 1261, 1315, 1240, 1240, - /* 150 */ 1240, 1240, 1240, 1547, 1546, 1240, 1443, 1240, 1273, 1432, - /* 160 */ 1431, 1457, 1444, 1456, 1455, 1535, 1599, 1598, 1493, 1240, - /* 170 */ 1240, 1240, 1240, 1240, 1240, 1563, 1240, 1240, 1240, 1240, - /* 180 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 190 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1371, - /* 200 */ 1563, 1563, 1240, 1273, 1563, 1563, 1372, 1372, 1269, 1269, - /* 210 */ 1375, 1240, 1542, 1342, 1342, 1342, 1342, 1351, 1342, 1240, - /* 220 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 230 */ 1240, 1240, 1240, 1240, 1532, 1530, 1240, 1240, 1240, 1240, - /* 240 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 250 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 260 */ 1240, 1240, 1240, 1347, 1240, 1240, 1240, 1240, 1240, 1240, - /* 270 */ 1240, 1240, 1240, 1240, 1240, 1592, 1240, 1505, 1329, 1347, - /* 280 */ 1347, 1347, 1347, 1349, 1330, 1328, 1341, 1274, 1247, 1639, - /* 290 */ 1407, 1396, 1348, 1396, 1636, 1394, 1407, 1407, 1394, 1407, - /* 300 */ 1348, 1636, 1290, 1615, 1285, 1381, 1381, 1381, 1371, 1371, - /* 310 */ 1371, 1371, 1375, 1375, 1472, 1348, 1341, 1240, 1639, 1639, - /* 320 */ 1357, 1357, 1638, 1638, 1357, 1493, 1623, 1416, 1318, 1324, - /* 330 */ 1324, 1324, 1324, 1357, 1258, 1394, 1623, 1623, 1394, 1416, - /* 340 */ 1318, 1394, 1318, 1394, 1357, 1258, 1509, 1633, 1357, 1258, - /* 350 */ 1483, 1357, 1258, 1357, 1258, 1483, 1316, 1316, 1316, 1305, - /* 360 */ 1240, 1240, 1483, 1316, 1290, 1316, 1305, 1316, 1316, 1581, - /* 370 */ 1240, 1487, 1487, 1483, 1357, 1573, 1573, 1384, 1384, 1389, - /* 380 */ 1375, 1478, 1357, 1240, 1389, 1387, 1385, 1394, 1308, 1595, - /* 390 */ 1595, 1591, 1591, 1591, 1644, 1644, 1542, 1608, 1273, 1273, - /* 400 */ 1273, 1273, 1608, 1292, 1292, 1274, 1274, 1273, 1608, 1240, - /* 410 */ 1240, 1240, 1240, 1240, 1240, 1603, 1240, 1537, 1494, 1361, - /* 420 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 430 */ 1240, 1240, 1240, 1240, 1548, 1240, 1240, 1240, 1240, 1240, - /* 440 */ 1240, 1240, 1240, 1240, 1240, 1421, 1240, 1243, 1539, 1240, - /* 450 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1398, 1399, 1362, - /* 460 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1413, 1240, 1240, - /* 470 */ 1240, 1408, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 480 */ 1635, 1240, 1240, 1240, 1240, 1240, 1240, 1508, 1507, 1240, - /* 490 */ 1240, 1359, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 500 */ 1240, 1240, 1240, 1240, 1240, 1288, 1240, 1240, 1240, 1240, - /* 510 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 520 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1386, - /* 530 */ 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 540 */ 1240, 1240, 1240, 1240, 1578, 1376, 1240, 1240, 1240, 1240, - /* 550 */ 1626, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, 1240, - /* 560 */ 1240, 1240, 1240, 1240, 1240, 1619, 1332, 1423, 1240, 1422, - /* 570 */ 1426, 1262, 1240, 1252, 1240, 1240, + /* 0 */ 1648, 1648, 1648, 1478, 1243, 1354, 1243, 1243, 1243, 1478, + /* 10 */ 1478, 1478, 1243, 1384, 1384, 1531, 1276, 1243, 1243, 1243, + /* 20 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1477, 1243, + /* 30 */ 1243, 1243, 1243, 1564, 1564, 1243, 1243, 1243, 1243, 1243, + /* 40 */ 1243, 1243, 1243, 1393, 1243, 1400, 1243, 1243, 1243, 1243, + /* 50 */ 1243, 1479, 1480, 1243, 1243, 1243, 1530, 1532, 1495, 1407, + /* 60 */ 1406, 1405, 1404, 1513, 1372, 1398, 1391, 1395, 1474, 1475, + /* 70 */ 1473, 1626, 1480, 1479, 1243, 1394, 1442, 1458, 1441, 1243, + /* 80 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 90 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 100 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 110 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 120 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 130 */ 1243, 1450, 1457, 1456, 1455, 1464, 1454, 1451, 1444, 1443, + /* 140 */ 1445, 1446, 1243, 1243, 1267, 1243, 1243, 1264, 1318, 1243, + /* 150 */ 1243, 1243, 1243, 1243, 1550, 1549, 1243, 1447, 1243, 1276, + /* 160 */ 1435, 1434, 1433, 1461, 1448, 1460, 1459, 1538, 1600, 1599, + /* 170 */ 1496, 1243, 1243, 1243, 1243, 1243, 1243, 1564, 1243, 1243, + /* 180 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 190 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 200 */ 1243, 1374, 1564, 1564, 1243, 1276, 1564, 1564, 1375, 1375, + /* 210 */ 1272, 1272, 1378, 1243, 1545, 1345, 1345, 1345, 1345, 1354, + /* 220 */ 1345, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 230 */ 1243, 1243, 1243, 1243, 1243, 1243, 1535, 1533, 1243, 1243, + /* 240 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 250 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 260 */ 1243, 1243, 1243, 1243, 1243, 1350, 1243, 1243, 1243, 1243, + /* 270 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1593, 1243, 1508, + /* 280 */ 1332, 1350, 1350, 1350, 1350, 1352, 1333, 1331, 1344, 1277, + /* 290 */ 1250, 1640, 1410, 1399, 1351, 1399, 1637, 1397, 1410, 1410, + /* 300 */ 1397, 1410, 1351, 1637, 1293, 1615, 1288, 1384, 1384, 1384, + /* 310 */ 1374, 1374, 1374, 1374, 1378, 1378, 1476, 1351, 1344, 1243, + /* 320 */ 1640, 1640, 1360, 1360, 1639, 1639, 1360, 1496, 1623, 1419, + /* 330 */ 1321, 1327, 1327, 1327, 1327, 1360, 1261, 1397, 1623, 1623, + /* 340 */ 1397, 1419, 1321, 1397, 1321, 1397, 1360, 1261, 1512, 1634, + /* 350 */ 1360, 1261, 1486, 1360, 1261, 1360, 1261, 1486, 1319, 1319, + /* 360 */ 1319, 1308, 1243, 1243, 1486, 1319, 1293, 1319, 1308, 1319, + /* 370 */ 1319, 1582, 1243, 1490, 1490, 1486, 1360, 1574, 1574, 1387, + /* 380 */ 1387, 1392, 1378, 1481, 1360, 1243, 1392, 1390, 1388, 1397, + /* 390 */ 1311, 1596, 1596, 1592, 1592, 1592, 1645, 1645, 1545, 1608, + /* 400 */ 1276, 1276, 1276, 1276, 1608, 1295, 1295, 1277, 1277, 1276, + /* 410 */ 1608, 1243, 1243, 1243, 1243, 1243, 1243, 1603, 1243, 1540, + /* 420 */ 1497, 1364, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 430 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1551, 1243, + /* 440 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1424, + /* 450 */ 1243, 1246, 1542, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 460 */ 1243, 1401, 1402, 1365, 1243, 1243, 1243, 1243, 1243, 1243, + /* 470 */ 1243, 1416, 1243, 1243, 1243, 1411, 1243, 1243, 1243, 1243, + /* 480 */ 1243, 1243, 1243, 1243, 1636, 1243, 1243, 1243, 1243, 1243, + /* 490 */ 1243, 1511, 1510, 1243, 1243, 1362, 1243, 1243, 1243, 1243, + /* 500 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1291, + /* 510 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 520 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, + /* 530 */ 1243, 1243, 1243, 1389, 1243, 1243, 1243, 1243, 1243, 1243, + /* 540 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1579, 1379, + /* 550 */ 1243, 1243, 1243, 1243, 1627, 1243, 1243, 1243, 1243, 1243, + /* 560 */ 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1243, 1619, + /* 570 */ 1335, 1425, 1243, 1428, 1265, 1243, 1255, 1243, 1243, }; /********** End of lemon-generated parsing tables *****************************/ @@ -170878,59 +178609,59 @@ static const char *const yyRuleName[] = { /* 175 */ "idlist ::= idlist COMMA nm", /* 176 */ "idlist ::= nm", /* 177 */ "expr ::= LP expr RP", - /* 178 */ "expr ::= ID|INDEXED", - /* 179 */ "expr ::= JOIN_KW", - /* 180 */ "expr ::= nm DOT nm", - /* 181 */ "expr ::= nm DOT nm DOT nm", - /* 182 */ "term ::= NULL|FLOAT|BLOB", - /* 183 */ "term ::= STRING", - /* 184 */ "term ::= INTEGER", - /* 185 */ "expr ::= VARIABLE", - /* 186 */ "expr ::= expr COLLATE ID|STRING", - /* 187 */ "expr ::= CAST LP expr AS typetoken RP", - /* 188 */ "expr ::= ID|INDEXED LP distinct exprlist RP", - /* 189 */ "expr ::= ID|INDEXED LP STAR RP", - /* 190 */ "expr ::= ID|INDEXED LP distinct exprlist RP filter_over", - /* 191 */ "expr ::= ID|INDEXED LP STAR RP filter_over", - /* 192 */ "term ::= CTIME_KW", - /* 193 */ "expr ::= LP nexprlist COMMA expr RP", - /* 194 */ "expr ::= expr AND expr", - /* 195 */ "expr ::= expr OR expr", - /* 196 */ "expr ::= expr LT|GT|GE|LE expr", - /* 197 */ "expr ::= expr EQ|NE expr", - /* 198 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", - /* 199 */ "expr ::= expr PLUS|MINUS expr", - /* 200 */ "expr ::= expr STAR|SLASH|REM expr", - /* 201 */ "expr ::= expr CONCAT expr", - /* 202 */ "likeop ::= NOT LIKE_KW|MATCH", - /* 203 */ "expr ::= expr likeop expr", - /* 204 */ "expr ::= expr likeop expr ESCAPE expr", - /* 205 */ "expr ::= expr ISNULL|NOTNULL", - /* 206 */ "expr ::= expr NOT NULL", - /* 207 */ "expr ::= expr IS expr", - /* 208 */ "expr ::= expr IS NOT expr", - /* 209 */ "expr ::= expr IS NOT DISTINCT FROM expr", - /* 210 */ "expr ::= expr IS DISTINCT FROM expr", - /* 211 */ "expr ::= NOT expr", - /* 212 */ "expr ::= BITNOT expr", - /* 213 */ "expr ::= PLUS|MINUS expr", - /* 214 */ "expr ::= expr PTR expr", - /* 215 */ "between_op ::= BETWEEN", - /* 216 */ "between_op ::= NOT BETWEEN", - /* 217 */ "expr ::= expr between_op expr AND expr", - /* 218 */ "in_op ::= IN", - /* 219 */ "in_op ::= NOT IN", - /* 220 */ "expr ::= expr in_op LP exprlist RP", - /* 221 */ "expr ::= LP select RP", - /* 222 */ "expr ::= expr in_op LP select RP", - /* 223 */ "expr ::= expr in_op nm dbnm paren_exprlist", - /* 224 */ "expr ::= EXISTS LP select RP", - /* 225 */ "expr ::= CASE case_operand case_exprlist case_else END", - /* 226 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", - /* 227 */ "case_exprlist ::= WHEN expr THEN expr", - /* 228 */ "case_else ::= ELSE expr", - /* 229 */ "case_else ::=", - /* 230 */ "case_operand ::= expr", + /* 178 */ "expr ::= ID|INDEXED|JOIN_KW", + /* 179 */ "expr ::= nm DOT nm", + /* 180 */ "expr ::= nm DOT nm DOT nm", + /* 181 */ "term ::= NULL|FLOAT|BLOB", + /* 182 */ "term ::= STRING", + /* 183 */ "term ::= INTEGER", + /* 184 */ "expr ::= VARIABLE", + /* 185 */ "expr ::= expr COLLATE ID|STRING", + /* 186 */ "expr ::= CAST LP expr AS typetoken RP", + /* 187 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP", + /* 188 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP", + /* 189 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP", + /* 190 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over", + /* 191 */ "expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over", + /* 192 */ "expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over", + /* 193 */ "term ::= CTIME_KW", + /* 194 */ "expr ::= LP nexprlist COMMA expr RP", + /* 195 */ "expr ::= expr AND expr", + /* 196 */ "expr ::= expr OR expr", + /* 197 */ "expr ::= expr LT|GT|GE|LE expr", + /* 198 */ "expr ::= expr EQ|NE expr", + /* 199 */ "expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr", + /* 200 */ "expr ::= expr PLUS|MINUS expr", + /* 201 */ "expr ::= expr STAR|SLASH|REM expr", + /* 202 */ "expr ::= expr CONCAT expr", + /* 203 */ "likeop ::= NOT LIKE_KW|MATCH", + /* 204 */ "expr ::= expr likeop expr", + /* 205 */ "expr ::= expr likeop expr ESCAPE expr", + /* 206 */ "expr ::= expr ISNULL|NOTNULL", + /* 207 */ "expr ::= expr NOT NULL", + /* 208 */ "expr ::= expr IS expr", + /* 209 */ "expr ::= expr IS NOT expr", + /* 210 */ "expr ::= expr IS NOT DISTINCT FROM expr", + /* 211 */ "expr ::= expr IS DISTINCT FROM expr", + /* 212 */ "expr ::= NOT expr", + /* 213 */ "expr ::= BITNOT expr", + /* 214 */ "expr ::= PLUS|MINUS expr", + /* 215 */ "expr ::= expr PTR expr", + /* 216 */ "between_op ::= BETWEEN", + /* 217 */ "between_op ::= NOT BETWEEN", + /* 218 */ "expr ::= expr between_op expr AND expr", + /* 219 */ "in_op ::= IN", + /* 220 */ "in_op ::= NOT IN", + /* 221 */ "expr ::= expr in_op LP exprlist RP", + /* 222 */ "expr ::= LP select RP", + /* 223 */ "expr ::= expr in_op LP select RP", + /* 224 */ "expr ::= expr in_op nm dbnm paren_exprlist", + /* 225 */ "expr ::= EXISTS LP select RP", + /* 226 */ "expr ::= CASE case_operand case_exprlist case_else END", + /* 227 */ "case_exprlist ::= case_exprlist WHEN expr THEN expr", + /* 228 */ "case_exprlist ::= WHEN expr THEN expr", + /* 229 */ "case_else ::= ELSE expr", + /* 230 */ "case_else ::=", /* 231 */ "case_operand ::=", /* 232 */ "exprlist ::=", /* 233 */ "nexprlist ::= nexprlist COMMA expr", @@ -171011,100 +178742,100 @@ static const char *const yyRuleName[] = { /* 308 */ "wqitem ::= nm eidlist_opt wqas LP select RP", /* 309 */ "wqlist ::= wqitem", /* 310 */ "wqlist ::= wqlist COMMA wqitem", - /* 311 */ "windowdefn_list ::= windowdefn", - /* 312 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", - /* 313 */ "windowdefn ::= nm AS LP window RP", - /* 314 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", - /* 315 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", - /* 316 */ "window ::= ORDER BY sortlist frame_opt", - /* 317 */ "window ::= nm ORDER BY sortlist frame_opt", - /* 318 */ "window ::= frame_opt", - /* 319 */ "window ::= nm frame_opt", - /* 320 */ "frame_opt ::=", - /* 321 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", - /* 322 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", - /* 323 */ "range_or_rows ::= RANGE|ROWS|GROUPS", - /* 324 */ "frame_bound_s ::= frame_bound", - /* 325 */ "frame_bound_s ::= UNBOUNDED PRECEDING", - /* 326 */ "frame_bound_e ::= frame_bound", - /* 327 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", - /* 328 */ "frame_bound ::= expr PRECEDING|FOLLOWING", - /* 329 */ "frame_bound ::= CURRENT ROW", - /* 330 */ "frame_exclude_opt ::=", - /* 331 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", - /* 332 */ "frame_exclude ::= NO OTHERS", - /* 333 */ "frame_exclude ::= CURRENT ROW", - /* 334 */ "frame_exclude ::= GROUP|TIES", - /* 335 */ "window_clause ::= WINDOW windowdefn_list", - /* 336 */ "filter_over ::= filter_clause over_clause", - /* 337 */ "filter_over ::= over_clause", - /* 338 */ "filter_over ::= filter_clause", - /* 339 */ "over_clause ::= OVER LP window RP", - /* 340 */ "over_clause ::= OVER nm", - /* 341 */ "filter_clause ::= FILTER LP WHERE expr RP", - /* 342 */ "input ::= cmdlist", - /* 343 */ "cmdlist ::= cmdlist ecmd", - /* 344 */ "cmdlist ::= ecmd", - /* 345 */ "ecmd ::= SEMI", - /* 346 */ "ecmd ::= cmdx SEMI", - /* 347 */ "ecmd ::= explain cmdx SEMI", - /* 348 */ "trans_opt ::=", - /* 349 */ "trans_opt ::= TRANSACTION", - /* 350 */ "trans_opt ::= TRANSACTION nm", - /* 351 */ "savepoint_opt ::= SAVEPOINT", - /* 352 */ "savepoint_opt ::=", - /* 353 */ "cmd ::= create_table create_table_args", - /* 354 */ "table_option_set ::= table_option", - /* 355 */ "columnlist ::= columnlist COMMA columnname carglist", - /* 356 */ "columnlist ::= columnname carglist", - /* 357 */ "nm ::= ID|INDEXED", - /* 358 */ "nm ::= STRING", - /* 359 */ "nm ::= JOIN_KW", - /* 360 */ "typetoken ::= typename", - /* 361 */ "typename ::= ID|STRING", - /* 362 */ "signed ::= plus_num", - /* 363 */ "signed ::= minus_num", - /* 364 */ "carglist ::= carglist ccons", - /* 365 */ "carglist ::=", - /* 366 */ "ccons ::= NULL onconf", - /* 367 */ "ccons ::= GENERATED ALWAYS AS generated", - /* 368 */ "ccons ::= AS generated", - /* 369 */ "conslist_opt ::= COMMA conslist", - /* 370 */ "conslist ::= conslist tconscomma tcons", - /* 371 */ "conslist ::= tcons", - /* 372 */ "tconscomma ::=", - /* 373 */ "defer_subclause_opt ::= defer_subclause", - /* 374 */ "resolvetype ::= raisetype", - /* 375 */ "selectnowith ::= oneselect", - /* 376 */ "oneselect ::= values", - /* 377 */ "sclp ::= selcollist COMMA", - /* 378 */ "as ::= ID|STRING", - /* 379 */ "indexed_opt ::= indexed_by", - /* 380 */ "returning ::=", - /* 381 */ "expr ::= term", - /* 382 */ "likeop ::= LIKE_KW|MATCH", - /* 383 */ "exprlist ::= nexprlist", - /* 384 */ "nmnum ::= plus_num", - /* 385 */ "nmnum ::= nm", - /* 386 */ "nmnum ::= ON", - /* 387 */ "nmnum ::= DELETE", - /* 388 */ "nmnum ::= DEFAULT", - /* 389 */ "plus_num ::= INTEGER|FLOAT", - /* 390 */ "foreach_clause ::=", - /* 391 */ "foreach_clause ::= FOR EACH ROW", - /* 392 */ "trnm ::= nm", - /* 393 */ "tridxby ::=", - /* 394 */ "database_kw_opt ::= DATABASE", - /* 395 */ "database_kw_opt ::=", - /* 396 */ "kwcolumn_opt ::=", - /* 397 */ "kwcolumn_opt ::= COLUMNKW", - /* 398 */ "vtabarglist ::= vtabarg", - /* 399 */ "vtabarglist ::= vtabarglist COMMA vtabarg", - /* 400 */ "vtabarg ::= vtabarg vtabargtoken", - /* 401 */ "anylist ::=", - /* 402 */ "anylist ::= anylist LP anylist RP", - /* 403 */ "anylist ::= anylist ANY", - /* 404 */ "with ::=", + /* 311 */ "windowdefn_list ::= windowdefn_list COMMA windowdefn", + /* 312 */ "windowdefn ::= nm AS LP window RP", + /* 313 */ "window ::= PARTITION BY nexprlist orderby_opt frame_opt", + /* 314 */ "window ::= nm PARTITION BY nexprlist orderby_opt frame_opt", + /* 315 */ "window ::= ORDER BY sortlist frame_opt", + /* 316 */ "window ::= nm ORDER BY sortlist frame_opt", + /* 317 */ "window ::= nm frame_opt", + /* 318 */ "frame_opt ::=", + /* 319 */ "frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt", + /* 320 */ "frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt", + /* 321 */ "range_or_rows ::= RANGE|ROWS|GROUPS", + /* 322 */ "frame_bound_s ::= frame_bound", + /* 323 */ "frame_bound_s ::= UNBOUNDED PRECEDING", + /* 324 */ "frame_bound_e ::= frame_bound", + /* 325 */ "frame_bound_e ::= UNBOUNDED FOLLOWING", + /* 326 */ "frame_bound ::= expr PRECEDING|FOLLOWING", + /* 327 */ "frame_bound ::= CURRENT ROW", + /* 328 */ "frame_exclude_opt ::=", + /* 329 */ "frame_exclude_opt ::= EXCLUDE frame_exclude", + /* 330 */ "frame_exclude ::= NO OTHERS", + /* 331 */ "frame_exclude ::= CURRENT ROW", + /* 332 */ "frame_exclude ::= GROUP|TIES", + /* 333 */ "window_clause ::= WINDOW windowdefn_list", + /* 334 */ "filter_over ::= filter_clause over_clause", + /* 335 */ "filter_over ::= over_clause", + /* 336 */ "filter_over ::= filter_clause", + /* 337 */ "over_clause ::= OVER LP window RP", + /* 338 */ "over_clause ::= OVER nm", + /* 339 */ "filter_clause ::= FILTER LP WHERE expr RP", + /* 340 */ "input ::= cmdlist", + /* 341 */ "cmdlist ::= cmdlist ecmd", + /* 342 */ "cmdlist ::= ecmd", + /* 343 */ "ecmd ::= SEMI", + /* 344 */ "ecmd ::= cmdx SEMI", + /* 345 */ "ecmd ::= explain cmdx SEMI", + /* 346 */ "trans_opt ::=", + /* 347 */ "trans_opt ::= TRANSACTION", + /* 348 */ "trans_opt ::= TRANSACTION nm", + /* 349 */ "savepoint_opt ::= SAVEPOINT", + /* 350 */ "savepoint_opt ::=", + /* 351 */ "cmd ::= create_table create_table_args", + /* 352 */ "table_option_set ::= table_option", + /* 353 */ "columnlist ::= columnlist COMMA columnname carglist", + /* 354 */ "columnlist ::= columnname carglist", + /* 355 */ "nm ::= ID|INDEXED|JOIN_KW", + /* 356 */ "nm ::= STRING", + /* 357 */ "typetoken ::= typename", + /* 358 */ "typename ::= ID|STRING", + /* 359 */ "signed ::= plus_num", + /* 360 */ "signed ::= minus_num", + /* 361 */ "carglist ::= carglist ccons", + /* 362 */ "carglist ::=", + /* 363 */ "ccons ::= NULL onconf", + /* 364 */ "ccons ::= GENERATED ALWAYS AS generated", + /* 365 */ "ccons ::= AS generated", + /* 366 */ "conslist_opt ::= COMMA conslist", + /* 367 */ "conslist ::= conslist tconscomma tcons", + /* 368 */ "conslist ::= tcons", + /* 369 */ "tconscomma ::=", + /* 370 */ "defer_subclause_opt ::= defer_subclause", + /* 371 */ "resolvetype ::= raisetype", + /* 372 */ "selectnowith ::= oneselect", + /* 373 */ "oneselect ::= values", + /* 374 */ "sclp ::= selcollist COMMA", + /* 375 */ "as ::= ID|STRING", + /* 376 */ "indexed_opt ::= indexed_by", + /* 377 */ "returning ::=", + /* 378 */ "expr ::= term", + /* 379 */ "likeop ::= LIKE_KW|MATCH", + /* 380 */ "case_operand ::= expr", + /* 381 */ "exprlist ::= nexprlist", + /* 382 */ "nmnum ::= plus_num", + /* 383 */ "nmnum ::= nm", + /* 384 */ "nmnum ::= ON", + /* 385 */ "nmnum ::= DELETE", + /* 386 */ "nmnum ::= DEFAULT", + /* 387 */ "plus_num ::= INTEGER|FLOAT", + /* 388 */ "foreach_clause ::=", + /* 389 */ "foreach_clause ::= FOR EACH ROW", + /* 390 */ "trnm ::= nm", + /* 391 */ "tridxby ::=", + /* 392 */ "database_kw_opt ::= DATABASE", + /* 393 */ "database_kw_opt ::=", + /* 394 */ "kwcolumn_opt ::=", + /* 395 */ "kwcolumn_opt ::= COLUMNKW", + /* 396 */ "vtabarglist ::= vtabarg", + /* 397 */ "vtabarglist ::= vtabarglist COMMA vtabarg", + /* 398 */ "vtabarg ::= vtabarg vtabargtoken", + /* 399 */ "anylist ::=", + /* 400 */ "anylist ::= anylist LP anylist RP", + /* 401 */ "anylist ::= anylist ANY", + /* 402 */ "with ::=", + /* 403 */ "windowdefn_list ::= windowdefn", + /* 404 */ "window ::= frame_opt", }; #endif /* NDEBUG */ @@ -171789,59 +179520,59 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 263, /* (175) idlist ::= idlist COMMA nm */ 263, /* (176) idlist ::= nm */ 217, /* (177) expr ::= LP expr RP */ - 217, /* (178) expr ::= ID|INDEXED */ - 217, /* (179) expr ::= JOIN_KW */ - 217, /* (180) expr ::= nm DOT nm */ - 217, /* (181) expr ::= nm DOT nm DOT nm */ - 216, /* (182) term ::= NULL|FLOAT|BLOB */ - 216, /* (183) term ::= STRING */ - 216, /* (184) term ::= INTEGER */ - 217, /* (185) expr ::= VARIABLE */ - 217, /* (186) expr ::= expr COLLATE ID|STRING */ - 217, /* (187) expr ::= CAST LP expr AS typetoken RP */ - 217, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ - 217, /* (189) expr ::= ID|INDEXED LP STAR RP */ - 217, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - 217, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ - 216, /* (192) term ::= CTIME_KW */ - 217, /* (193) expr ::= LP nexprlist COMMA expr RP */ - 217, /* (194) expr ::= expr AND expr */ - 217, /* (195) expr ::= expr OR expr */ - 217, /* (196) expr ::= expr LT|GT|GE|LE expr */ - 217, /* (197) expr ::= expr EQ|NE expr */ - 217, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - 217, /* (199) expr ::= expr PLUS|MINUS expr */ - 217, /* (200) expr ::= expr STAR|SLASH|REM expr */ - 217, /* (201) expr ::= expr CONCAT expr */ - 274, /* (202) likeop ::= NOT LIKE_KW|MATCH */ - 217, /* (203) expr ::= expr likeop expr */ - 217, /* (204) expr ::= expr likeop expr ESCAPE expr */ - 217, /* (205) expr ::= expr ISNULL|NOTNULL */ - 217, /* (206) expr ::= expr NOT NULL */ - 217, /* (207) expr ::= expr IS expr */ - 217, /* (208) expr ::= expr IS NOT expr */ - 217, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */ - 217, /* (210) expr ::= expr IS DISTINCT FROM expr */ - 217, /* (211) expr ::= NOT expr */ - 217, /* (212) expr ::= BITNOT expr */ - 217, /* (213) expr ::= PLUS|MINUS expr */ - 217, /* (214) expr ::= expr PTR expr */ - 275, /* (215) between_op ::= BETWEEN */ - 275, /* (216) between_op ::= NOT BETWEEN */ - 217, /* (217) expr ::= expr between_op expr AND expr */ - 276, /* (218) in_op ::= IN */ - 276, /* (219) in_op ::= NOT IN */ - 217, /* (220) expr ::= expr in_op LP exprlist RP */ - 217, /* (221) expr ::= LP select RP */ - 217, /* (222) expr ::= expr in_op LP select RP */ - 217, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */ - 217, /* (224) expr ::= EXISTS LP select RP */ - 217, /* (225) expr ::= CASE case_operand case_exprlist case_else END */ - 279, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - 279, /* (227) case_exprlist ::= WHEN expr THEN expr */ - 280, /* (228) case_else ::= ELSE expr */ - 280, /* (229) case_else ::= */ - 278, /* (230) case_operand ::= expr */ + 217, /* (178) expr ::= ID|INDEXED|JOIN_KW */ + 217, /* (179) expr ::= nm DOT nm */ + 217, /* (180) expr ::= nm DOT nm DOT nm */ + 216, /* (181) term ::= NULL|FLOAT|BLOB */ + 216, /* (182) term ::= STRING */ + 216, /* (183) term ::= INTEGER */ + 217, /* (184) expr ::= VARIABLE */ + 217, /* (185) expr ::= expr COLLATE ID|STRING */ + 217, /* (186) expr ::= CAST LP expr AS typetoken RP */ + 217, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + 217, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + 217, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + 217, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + 217, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + 217, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + 216, /* (193) term ::= CTIME_KW */ + 217, /* (194) expr ::= LP nexprlist COMMA expr RP */ + 217, /* (195) expr ::= expr AND expr */ + 217, /* (196) expr ::= expr OR expr */ + 217, /* (197) expr ::= expr LT|GT|GE|LE expr */ + 217, /* (198) expr ::= expr EQ|NE expr */ + 217, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + 217, /* (200) expr ::= expr PLUS|MINUS expr */ + 217, /* (201) expr ::= expr STAR|SLASH|REM expr */ + 217, /* (202) expr ::= expr CONCAT expr */ + 274, /* (203) likeop ::= NOT LIKE_KW|MATCH */ + 217, /* (204) expr ::= expr likeop expr */ + 217, /* (205) expr ::= expr likeop expr ESCAPE expr */ + 217, /* (206) expr ::= expr ISNULL|NOTNULL */ + 217, /* (207) expr ::= expr NOT NULL */ + 217, /* (208) expr ::= expr IS expr */ + 217, /* (209) expr ::= expr IS NOT expr */ + 217, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ + 217, /* (211) expr ::= expr IS DISTINCT FROM expr */ + 217, /* (212) expr ::= NOT expr */ + 217, /* (213) expr ::= BITNOT expr */ + 217, /* (214) expr ::= PLUS|MINUS expr */ + 217, /* (215) expr ::= expr PTR expr */ + 275, /* (216) between_op ::= BETWEEN */ + 275, /* (217) between_op ::= NOT BETWEEN */ + 217, /* (218) expr ::= expr between_op expr AND expr */ + 276, /* (219) in_op ::= IN */ + 276, /* (220) in_op ::= NOT IN */ + 217, /* (221) expr ::= expr in_op LP exprlist RP */ + 217, /* (222) expr ::= LP select RP */ + 217, /* (223) expr ::= expr in_op LP select RP */ + 217, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ + 217, /* (225) expr ::= EXISTS LP select RP */ + 217, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ + 279, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + 279, /* (228) case_exprlist ::= WHEN expr THEN expr */ + 280, /* (229) case_else ::= ELSE expr */ + 280, /* (230) case_else ::= */ 278, /* (231) case_operand ::= */ 261, /* (232) exprlist ::= */ 253, /* (233) nexprlist ::= nexprlist COMMA expr */ @@ -171922,100 +179653,100 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 304, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ 241, /* (309) wqlist ::= wqitem */ 241, /* (310) wqlist ::= wqlist COMMA wqitem */ - 306, /* (311) windowdefn_list ::= windowdefn */ - 306, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - 307, /* (313) windowdefn ::= nm AS LP window RP */ - 308, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - 308, /* (316) window ::= ORDER BY sortlist frame_opt */ - 308, /* (317) window ::= nm ORDER BY sortlist frame_opt */ - 308, /* (318) window ::= frame_opt */ - 308, /* (319) window ::= nm frame_opt */ - 309, /* (320) frame_opt ::= */ - 309, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - 309, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - 313, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */ - 315, /* (324) frame_bound_s ::= frame_bound */ - 315, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */ - 316, /* (326) frame_bound_e ::= frame_bound */ - 316, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */ - 314, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */ - 314, /* (329) frame_bound ::= CURRENT ROW */ - 317, /* (330) frame_exclude_opt ::= */ - 317, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */ - 318, /* (332) frame_exclude ::= NO OTHERS */ - 318, /* (333) frame_exclude ::= CURRENT ROW */ - 318, /* (334) frame_exclude ::= GROUP|TIES */ - 251, /* (335) window_clause ::= WINDOW windowdefn_list */ - 273, /* (336) filter_over ::= filter_clause over_clause */ - 273, /* (337) filter_over ::= over_clause */ - 273, /* (338) filter_over ::= filter_clause */ - 312, /* (339) over_clause ::= OVER LP window RP */ - 312, /* (340) over_clause ::= OVER nm */ - 311, /* (341) filter_clause ::= FILTER LP WHERE expr RP */ - 185, /* (342) input ::= cmdlist */ - 186, /* (343) cmdlist ::= cmdlist ecmd */ - 186, /* (344) cmdlist ::= ecmd */ - 187, /* (345) ecmd ::= SEMI */ - 187, /* (346) ecmd ::= cmdx SEMI */ - 187, /* (347) ecmd ::= explain cmdx SEMI */ - 192, /* (348) trans_opt ::= */ - 192, /* (349) trans_opt ::= TRANSACTION */ - 192, /* (350) trans_opt ::= TRANSACTION nm */ - 194, /* (351) savepoint_opt ::= SAVEPOINT */ - 194, /* (352) savepoint_opt ::= */ - 190, /* (353) cmd ::= create_table create_table_args */ - 203, /* (354) table_option_set ::= table_option */ - 201, /* (355) columnlist ::= columnlist COMMA columnname carglist */ - 201, /* (356) columnlist ::= columnname carglist */ - 193, /* (357) nm ::= ID|INDEXED */ - 193, /* (358) nm ::= STRING */ - 193, /* (359) nm ::= JOIN_KW */ - 208, /* (360) typetoken ::= typename */ - 209, /* (361) typename ::= ID|STRING */ - 210, /* (362) signed ::= plus_num */ - 210, /* (363) signed ::= minus_num */ - 207, /* (364) carglist ::= carglist ccons */ - 207, /* (365) carglist ::= */ - 215, /* (366) ccons ::= NULL onconf */ - 215, /* (367) ccons ::= GENERATED ALWAYS AS generated */ - 215, /* (368) ccons ::= AS generated */ - 202, /* (369) conslist_opt ::= COMMA conslist */ - 228, /* (370) conslist ::= conslist tconscomma tcons */ - 228, /* (371) conslist ::= tcons */ - 229, /* (372) tconscomma ::= */ - 233, /* (373) defer_subclause_opt ::= defer_subclause */ - 235, /* (374) resolvetype ::= raisetype */ - 239, /* (375) selectnowith ::= oneselect */ - 240, /* (376) oneselect ::= values */ - 254, /* (377) sclp ::= selcollist COMMA */ - 255, /* (378) as ::= ID|STRING */ - 264, /* (379) indexed_opt ::= indexed_by */ - 272, /* (380) returning ::= */ - 217, /* (381) expr ::= term */ - 274, /* (382) likeop ::= LIKE_KW|MATCH */ - 261, /* (383) exprlist ::= nexprlist */ - 284, /* (384) nmnum ::= plus_num */ - 284, /* (385) nmnum ::= nm */ - 284, /* (386) nmnum ::= ON */ - 284, /* (387) nmnum ::= DELETE */ - 284, /* (388) nmnum ::= DEFAULT */ - 211, /* (389) plus_num ::= INTEGER|FLOAT */ - 289, /* (390) foreach_clause ::= */ - 289, /* (391) foreach_clause ::= FOR EACH ROW */ - 292, /* (392) trnm ::= nm */ - 293, /* (393) tridxby ::= */ - 294, /* (394) database_kw_opt ::= DATABASE */ - 294, /* (395) database_kw_opt ::= */ - 297, /* (396) kwcolumn_opt ::= */ - 297, /* (397) kwcolumn_opt ::= COLUMNKW */ - 299, /* (398) vtabarglist ::= vtabarg */ - 299, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ - 300, /* (400) vtabarg ::= vtabarg vtabargtoken */ - 303, /* (401) anylist ::= */ - 303, /* (402) anylist ::= anylist LP anylist RP */ - 303, /* (403) anylist ::= anylist ANY */ - 266, /* (404) with ::= */ + 306, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + 307, /* (312) windowdefn ::= nm AS LP window RP */ + 308, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + 308, /* (315) window ::= ORDER BY sortlist frame_opt */ + 308, /* (316) window ::= nm ORDER BY sortlist frame_opt */ + 308, /* (317) window ::= nm frame_opt */ + 309, /* (318) frame_opt ::= */ + 309, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + 309, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + 313, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + 315, /* (322) frame_bound_s ::= frame_bound */ + 315, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + 316, /* (324) frame_bound_e ::= frame_bound */ + 316, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + 314, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + 314, /* (327) frame_bound ::= CURRENT ROW */ + 317, /* (328) frame_exclude_opt ::= */ + 317, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + 318, /* (330) frame_exclude ::= NO OTHERS */ + 318, /* (331) frame_exclude ::= CURRENT ROW */ + 318, /* (332) frame_exclude ::= GROUP|TIES */ + 251, /* (333) window_clause ::= WINDOW windowdefn_list */ + 273, /* (334) filter_over ::= filter_clause over_clause */ + 273, /* (335) filter_over ::= over_clause */ + 273, /* (336) filter_over ::= filter_clause */ + 312, /* (337) over_clause ::= OVER LP window RP */ + 312, /* (338) over_clause ::= OVER nm */ + 311, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + 185, /* (340) input ::= cmdlist */ + 186, /* (341) cmdlist ::= cmdlist ecmd */ + 186, /* (342) cmdlist ::= ecmd */ + 187, /* (343) ecmd ::= SEMI */ + 187, /* (344) ecmd ::= cmdx SEMI */ + 187, /* (345) ecmd ::= explain cmdx SEMI */ + 192, /* (346) trans_opt ::= */ + 192, /* (347) trans_opt ::= TRANSACTION */ + 192, /* (348) trans_opt ::= TRANSACTION nm */ + 194, /* (349) savepoint_opt ::= SAVEPOINT */ + 194, /* (350) savepoint_opt ::= */ + 190, /* (351) cmd ::= create_table create_table_args */ + 203, /* (352) table_option_set ::= table_option */ + 201, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + 201, /* (354) columnlist ::= columnname carglist */ + 193, /* (355) nm ::= ID|INDEXED|JOIN_KW */ + 193, /* (356) nm ::= STRING */ + 208, /* (357) typetoken ::= typename */ + 209, /* (358) typename ::= ID|STRING */ + 210, /* (359) signed ::= plus_num */ + 210, /* (360) signed ::= minus_num */ + 207, /* (361) carglist ::= carglist ccons */ + 207, /* (362) carglist ::= */ + 215, /* (363) ccons ::= NULL onconf */ + 215, /* (364) ccons ::= GENERATED ALWAYS AS generated */ + 215, /* (365) ccons ::= AS generated */ + 202, /* (366) conslist_opt ::= COMMA conslist */ + 228, /* (367) conslist ::= conslist tconscomma tcons */ + 228, /* (368) conslist ::= tcons */ + 229, /* (369) tconscomma ::= */ + 233, /* (370) defer_subclause_opt ::= defer_subclause */ + 235, /* (371) resolvetype ::= raisetype */ + 239, /* (372) selectnowith ::= oneselect */ + 240, /* (373) oneselect ::= values */ + 254, /* (374) sclp ::= selcollist COMMA */ + 255, /* (375) as ::= ID|STRING */ + 264, /* (376) indexed_opt ::= indexed_by */ + 272, /* (377) returning ::= */ + 217, /* (378) expr ::= term */ + 274, /* (379) likeop ::= LIKE_KW|MATCH */ + 278, /* (380) case_operand ::= expr */ + 261, /* (381) exprlist ::= nexprlist */ + 284, /* (382) nmnum ::= plus_num */ + 284, /* (383) nmnum ::= nm */ + 284, /* (384) nmnum ::= ON */ + 284, /* (385) nmnum ::= DELETE */ + 284, /* (386) nmnum ::= DEFAULT */ + 211, /* (387) plus_num ::= INTEGER|FLOAT */ + 289, /* (388) foreach_clause ::= */ + 289, /* (389) foreach_clause ::= FOR EACH ROW */ + 292, /* (390) trnm ::= nm */ + 293, /* (391) tridxby ::= */ + 294, /* (392) database_kw_opt ::= DATABASE */ + 294, /* (393) database_kw_opt ::= */ + 297, /* (394) kwcolumn_opt ::= */ + 297, /* (395) kwcolumn_opt ::= COLUMNKW */ + 299, /* (396) vtabarglist ::= vtabarg */ + 299, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ + 300, /* (398) vtabarg ::= vtabarg vtabargtoken */ + 303, /* (399) anylist ::= */ + 303, /* (400) anylist ::= anylist LP anylist RP */ + 303, /* (401) anylist ::= anylist ANY */ + 266, /* (402) with ::= */ + 306, /* (403) windowdefn_list ::= windowdefn */ + 308, /* (404) window ::= frame_opt */ }; /* For rule J, yyRuleInfoNRhs[J] contains the negative of the number @@ -172199,59 +179930,59 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (175) idlist ::= idlist COMMA nm */ -1, /* (176) idlist ::= nm */ -3, /* (177) expr ::= LP expr RP */ - -1, /* (178) expr ::= ID|INDEXED */ - -1, /* (179) expr ::= JOIN_KW */ - -3, /* (180) expr ::= nm DOT nm */ - -5, /* (181) expr ::= nm DOT nm DOT nm */ - -1, /* (182) term ::= NULL|FLOAT|BLOB */ - -1, /* (183) term ::= STRING */ - -1, /* (184) term ::= INTEGER */ - -1, /* (185) expr ::= VARIABLE */ - -3, /* (186) expr ::= expr COLLATE ID|STRING */ - -6, /* (187) expr ::= CAST LP expr AS typetoken RP */ - -5, /* (188) expr ::= ID|INDEXED LP distinct exprlist RP */ - -4, /* (189) expr ::= ID|INDEXED LP STAR RP */ - -6, /* (190) expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ - -5, /* (191) expr ::= ID|INDEXED LP STAR RP filter_over */ - -1, /* (192) term ::= CTIME_KW */ - -5, /* (193) expr ::= LP nexprlist COMMA expr RP */ - -3, /* (194) expr ::= expr AND expr */ - -3, /* (195) expr ::= expr OR expr */ - -3, /* (196) expr ::= expr LT|GT|GE|LE expr */ - -3, /* (197) expr ::= expr EQ|NE expr */ - -3, /* (198) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ - -3, /* (199) expr ::= expr PLUS|MINUS expr */ - -3, /* (200) expr ::= expr STAR|SLASH|REM expr */ - -3, /* (201) expr ::= expr CONCAT expr */ - -2, /* (202) likeop ::= NOT LIKE_KW|MATCH */ - -3, /* (203) expr ::= expr likeop expr */ - -5, /* (204) expr ::= expr likeop expr ESCAPE expr */ - -2, /* (205) expr ::= expr ISNULL|NOTNULL */ - -3, /* (206) expr ::= expr NOT NULL */ - -3, /* (207) expr ::= expr IS expr */ - -4, /* (208) expr ::= expr IS NOT expr */ - -6, /* (209) expr ::= expr IS NOT DISTINCT FROM expr */ - -5, /* (210) expr ::= expr IS DISTINCT FROM expr */ - -2, /* (211) expr ::= NOT expr */ - -2, /* (212) expr ::= BITNOT expr */ - -2, /* (213) expr ::= PLUS|MINUS expr */ - -3, /* (214) expr ::= expr PTR expr */ - -1, /* (215) between_op ::= BETWEEN */ - -2, /* (216) between_op ::= NOT BETWEEN */ - -5, /* (217) expr ::= expr between_op expr AND expr */ - -1, /* (218) in_op ::= IN */ - -2, /* (219) in_op ::= NOT IN */ - -5, /* (220) expr ::= expr in_op LP exprlist RP */ - -3, /* (221) expr ::= LP select RP */ - -5, /* (222) expr ::= expr in_op LP select RP */ - -5, /* (223) expr ::= expr in_op nm dbnm paren_exprlist */ - -4, /* (224) expr ::= EXISTS LP select RP */ - -5, /* (225) expr ::= CASE case_operand case_exprlist case_else END */ - -5, /* (226) case_exprlist ::= case_exprlist WHEN expr THEN expr */ - -4, /* (227) case_exprlist ::= WHEN expr THEN expr */ - -2, /* (228) case_else ::= ELSE expr */ - 0, /* (229) case_else ::= */ - -1, /* (230) case_operand ::= expr */ + -1, /* (178) expr ::= ID|INDEXED|JOIN_KW */ + -3, /* (179) expr ::= nm DOT nm */ + -5, /* (180) expr ::= nm DOT nm DOT nm */ + -1, /* (181) term ::= NULL|FLOAT|BLOB */ + -1, /* (182) term ::= STRING */ + -1, /* (183) term ::= INTEGER */ + -1, /* (184) expr ::= VARIABLE */ + -3, /* (185) expr ::= expr COLLATE ID|STRING */ + -6, /* (186) expr ::= CAST LP expr AS typetoken RP */ + -5, /* (187) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ + -8, /* (188) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ + -4, /* (189) expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ + -6, /* (190) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ + -9, /* (191) expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ + -5, /* (192) expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ + -1, /* (193) term ::= CTIME_KW */ + -5, /* (194) expr ::= LP nexprlist COMMA expr RP */ + -3, /* (195) expr ::= expr AND expr */ + -3, /* (196) expr ::= expr OR expr */ + -3, /* (197) expr ::= expr LT|GT|GE|LE expr */ + -3, /* (198) expr ::= expr EQ|NE expr */ + -3, /* (199) expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ + -3, /* (200) expr ::= expr PLUS|MINUS expr */ + -3, /* (201) expr ::= expr STAR|SLASH|REM expr */ + -3, /* (202) expr ::= expr CONCAT expr */ + -2, /* (203) likeop ::= NOT LIKE_KW|MATCH */ + -3, /* (204) expr ::= expr likeop expr */ + -5, /* (205) expr ::= expr likeop expr ESCAPE expr */ + -2, /* (206) expr ::= expr ISNULL|NOTNULL */ + -3, /* (207) expr ::= expr NOT NULL */ + -3, /* (208) expr ::= expr IS expr */ + -4, /* (209) expr ::= expr IS NOT expr */ + -6, /* (210) expr ::= expr IS NOT DISTINCT FROM expr */ + -5, /* (211) expr ::= expr IS DISTINCT FROM expr */ + -2, /* (212) expr ::= NOT expr */ + -2, /* (213) expr ::= BITNOT expr */ + -2, /* (214) expr ::= PLUS|MINUS expr */ + -3, /* (215) expr ::= expr PTR expr */ + -1, /* (216) between_op ::= BETWEEN */ + -2, /* (217) between_op ::= NOT BETWEEN */ + -5, /* (218) expr ::= expr between_op expr AND expr */ + -1, /* (219) in_op ::= IN */ + -2, /* (220) in_op ::= NOT IN */ + -5, /* (221) expr ::= expr in_op LP exprlist RP */ + -3, /* (222) expr ::= LP select RP */ + -5, /* (223) expr ::= expr in_op LP select RP */ + -5, /* (224) expr ::= expr in_op nm dbnm paren_exprlist */ + -4, /* (225) expr ::= EXISTS LP select RP */ + -5, /* (226) expr ::= CASE case_operand case_exprlist case_else END */ + -5, /* (227) case_exprlist ::= case_exprlist WHEN expr THEN expr */ + -4, /* (228) case_exprlist ::= WHEN expr THEN expr */ + -2, /* (229) case_else ::= ELSE expr */ + 0, /* (230) case_else ::= */ 0, /* (231) case_operand ::= */ 0, /* (232) exprlist ::= */ -3, /* (233) nexprlist ::= nexprlist COMMA expr */ @@ -172332,100 +180063,100 @@ static const signed char yyRuleInfoNRhs[] = { -6, /* (308) wqitem ::= nm eidlist_opt wqas LP select RP */ -1, /* (309) wqlist ::= wqitem */ -3, /* (310) wqlist ::= wqlist COMMA wqitem */ - -1, /* (311) windowdefn_list ::= windowdefn */ - -3, /* (312) windowdefn_list ::= windowdefn_list COMMA windowdefn */ - -5, /* (313) windowdefn ::= nm AS LP window RP */ - -5, /* (314) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ - -6, /* (315) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ - -4, /* (316) window ::= ORDER BY sortlist frame_opt */ - -5, /* (317) window ::= nm ORDER BY sortlist frame_opt */ - -1, /* (318) window ::= frame_opt */ - -2, /* (319) window ::= nm frame_opt */ - 0, /* (320) frame_opt ::= */ - -3, /* (321) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ - -6, /* (322) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ - -1, /* (323) range_or_rows ::= RANGE|ROWS|GROUPS */ - -1, /* (324) frame_bound_s ::= frame_bound */ - -2, /* (325) frame_bound_s ::= UNBOUNDED PRECEDING */ - -1, /* (326) frame_bound_e ::= frame_bound */ - -2, /* (327) frame_bound_e ::= UNBOUNDED FOLLOWING */ - -2, /* (328) frame_bound ::= expr PRECEDING|FOLLOWING */ - -2, /* (329) frame_bound ::= CURRENT ROW */ - 0, /* (330) frame_exclude_opt ::= */ - -2, /* (331) frame_exclude_opt ::= EXCLUDE frame_exclude */ - -2, /* (332) frame_exclude ::= NO OTHERS */ - -2, /* (333) frame_exclude ::= CURRENT ROW */ - -1, /* (334) frame_exclude ::= GROUP|TIES */ - -2, /* (335) window_clause ::= WINDOW windowdefn_list */ - -2, /* (336) filter_over ::= filter_clause over_clause */ - -1, /* (337) filter_over ::= over_clause */ - -1, /* (338) filter_over ::= filter_clause */ - -4, /* (339) over_clause ::= OVER LP window RP */ - -2, /* (340) over_clause ::= OVER nm */ - -5, /* (341) filter_clause ::= FILTER LP WHERE expr RP */ - -1, /* (342) input ::= cmdlist */ - -2, /* (343) cmdlist ::= cmdlist ecmd */ - -1, /* (344) cmdlist ::= ecmd */ - -1, /* (345) ecmd ::= SEMI */ - -2, /* (346) ecmd ::= cmdx SEMI */ - -3, /* (347) ecmd ::= explain cmdx SEMI */ - 0, /* (348) trans_opt ::= */ - -1, /* (349) trans_opt ::= TRANSACTION */ - -2, /* (350) trans_opt ::= TRANSACTION nm */ - -1, /* (351) savepoint_opt ::= SAVEPOINT */ - 0, /* (352) savepoint_opt ::= */ - -2, /* (353) cmd ::= create_table create_table_args */ - -1, /* (354) table_option_set ::= table_option */ - -4, /* (355) columnlist ::= columnlist COMMA columnname carglist */ - -2, /* (356) columnlist ::= columnname carglist */ - -1, /* (357) nm ::= ID|INDEXED */ - -1, /* (358) nm ::= STRING */ - -1, /* (359) nm ::= JOIN_KW */ - -1, /* (360) typetoken ::= typename */ - -1, /* (361) typename ::= ID|STRING */ - -1, /* (362) signed ::= plus_num */ - -1, /* (363) signed ::= minus_num */ - -2, /* (364) carglist ::= carglist ccons */ - 0, /* (365) carglist ::= */ - -2, /* (366) ccons ::= NULL onconf */ - -4, /* (367) ccons ::= GENERATED ALWAYS AS generated */ - -2, /* (368) ccons ::= AS generated */ - -2, /* (369) conslist_opt ::= COMMA conslist */ - -3, /* (370) conslist ::= conslist tconscomma tcons */ - -1, /* (371) conslist ::= tcons */ - 0, /* (372) tconscomma ::= */ - -1, /* (373) defer_subclause_opt ::= defer_subclause */ - -1, /* (374) resolvetype ::= raisetype */ - -1, /* (375) selectnowith ::= oneselect */ - -1, /* (376) oneselect ::= values */ - -2, /* (377) sclp ::= selcollist COMMA */ - -1, /* (378) as ::= ID|STRING */ - -1, /* (379) indexed_opt ::= indexed_by */ - 0, /* (380) returning ::= */ - -1, /* (381) expr ::= term */ - -1, /* (382) likeop ::= LIKE_KW|MATCH */ - -1, /* (383) exprlist ::= nexprlist */ - -1, /* (384) nmnum ::= plus_num */ - -1, /* (385) nmnum ::= nm */ - -1, /* (386) nmnum ::= ON */ - -1, /* (387) nmnum ::= DELETE */ - -1, /* (388) nmnum ::= DEFAULT */ - -1, /* (389) plus_num ::= INTEGER|FLOAT */ - 0, /* (390) foreach_clause ::= */ - -3, /* (391) foreach_clause ::= FOR EACH ROW */ - -1, /* (392) trnm ::= nm */ - 0, /* (393) tridxby ::= */ - -1, /* (394) database_kw_opt ::= DATABASE */ - 0, /* (395) database_kw_opt ::= */ - 0, /* (396) kwcolumn_opt ::= */ - -1, /* (397) kwcolumn_opt ::= COLUMNKW */ - -1, /* (398) vtabarglist ::= vtabarg */ - -3, /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ - -2, /* (400) vtabarg ::= vtabarg vtabargtoken */ - 0, /* (401) anylist ::= */ - -4, /* (402) anylist ::= anylist LP anylist RP */ - -2, /* (403) anylist ::= anylist ANY */ - 0, /* (404) with ::= */ + -3, /* (311) windowdefn_list ::= windowdefn_list COMMA windowdefn */ + -5, /* (312) windowdefn ::= nm AS LP window RP */ + -5, /* (313) window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + -6, /* (314) window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + -4, /* (315) window ::= ORDER BY sortlist frame_opt */ + -5, /* (316) window ::= nm ORDER BY sortlist frame_opt */ + -2, /* (317) window ::= nm frame_opt */ + 0, /* (318) frame_opt ::= */ + -3, /* (319) frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + -6, /* (320) frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + -1, /* (321) range_or_rows ::= RANGE|ROWS|GROUPS */ + -1, /* (322) frame_bound_s ::= frame_bound */ + -2, /* (323) frame_bound_s ::= UNBOUNDED PRECEDING */ + -1, /* (324) frame_bound_e ::= frame_bound */ + -2, /* (325) frame_bound_e ::= UNBOUNDED FOLLOWING */ + -2, /* (326) frame_bound ::= expr PRECEDING|FOLLOWING */ + -2, /* (327) frame_bound ::= CURRENT ROW */ + 0, /* (328) frame_exclude_opt ::= */ + -2, /* (329) frame_exclude_opt ::= EXCLUDE frame_exclude */ + -2, /* (330) frame_exclude ::= NO OTHERS */ + -2, /* (331) frame_exclude ::= CURRENT ROW */ + -1, /* (332) frame_exclude ::= GROUP|TIES */ + -2, /* (333) window_clause ::= WINDOW windowdefn_list */ + -2, /* (334) filter_over ::= filter_clause over_clause */ + -1, /* (335) filter_over ::= over_clause */ + -1, /* (336) filter_over ::= filter_clause */ + -4, /* (337) over_clause ::= OVER LP window RP */ + -2, /* (338) over_clause ::= OVER nm */ + -5, /* (339) filter_clause ::= FILTER LP WHERE expr RP */ + -1, /* (340) input ::= cmdlist */ + -2, /* (341) cmdlist ::= cmdlist ecmd */ + -1, /* (342) cmdlist ::= ecmd */ + -1, /* (343) ecmd ::= SEMI */ + -2, /* (344) ecmd ::= cmdx SEMI */ + -3, /* (345) ecmd ::= explain cmdx SEMI */ + 0, /* (346) trans_opt ::= */ + -1, /* (347) trans_opt ::= TRANSACTION */ + -2, /* (348) trans_opt ::= TRANSACTION nm */ + -1, /* (349) savepoint_opt ::= SAVEPOINT */ + 0, /* (350) savepoint_opt ::= */ + -2, /* (351) cmd ::= create_table create_table_args */ + -1, /* (352) table_option_set ::= table_option */ + -4, /* (353) columnlist ::= columnlist COMMA columnname carglist */ + -2, /* (354) columnlist ::= columnname carglist */ + -1, /* (355) nm ::= ID|INDEXED|JOIN_KW */ + -1, /* (356) nm ::= STRING */ + -1, /* (357) typetoken ::= typename */ + -1, /* (358) typename ::= ID|STRING */ + -1, /* (359) signed ::= plus_num */ + -1, /* (360) signed ::= minus_num */ + -2, /* (361) carglist ::= carglist ccons */ + 0, /* (362) carglist ::= */ + -2, /* (363) ccons ::= NULL onconf */ + -4, /* (364) ccons ::= GENERATED ALWAYS AS generated */ + -2, /* (365) ccons ::= AS generated */ + -2, /* (366) conslist_opt ::= COMMA conslist */ + -3, /* (367) conslist ::= conslist tconscomma tcons */ + -1, /* (368) conslist ::= tcons */ + 0, /* (369) tconscomma ::= */ + -1, /* (370) defer_subclause_opt ::= defer_subclause */ + -1, /* (371) resolvetype ::= raisetype */ + -1, /* (372) selectnowith ::= oneselect */ + -1, /* (373) oneselect ::= values */ + -2, /* (374) sclp ::= selcollist COMMA */ + -1, /* (375) as ::= ID|STRING */ + -1, /* (376) indexed_opt ::= indexed_by */ + 0, /* (377) returning ::= */ + -1, /* (378) expr ::= term */ + -1, /* (379) likeop ::= LIKE_KW|MATCH */ + -1, /* (380) case_operand ::= expr */ + -1, /* (381) exprlist ::= nexprlist */ + -1, /* (382) nmnum ::= plus_num */ + -1, /* (383) nmnum ::= nm */ + -1, /* (384) nmnum ::= ON */ + -1, /* (385) nmnum ::= DELETE */ + -1, /* (386) nmnum ::= DEFAULT */ + -1, /* (387) plus_num ::= INTEGER|FLOAT */ + 0, /* (388) foreach_clause ::= */ + -3, /* (389) foreach_clause ::= FOR EACH ROW */ + -1, /* (390) trnm ::= nm */ + 0, /* (391) tridxby ::= */ + -1, /* (392) database_kw_opt ::= DATABASE */ + 0, /* (393) database_kw_opt ::= */ + 0, /* (394) kwcolumn_opt ::= */ + -1, /* (395) kwcolumn_opt ::= COLUMNKW */ + -1, /* (396) vtabarglist ::= vtabarg */ + -3, /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ + -2, /* (398) vtabarg ::= vtabarg vtabargtoken */ + 0, /* (399) anylist ::= */ + -4, /* (400) anylist ::= anylist LP anylist RP */ + -2, /* (401) anylist ::= anylist ANY */ + 0, /* (402) with ::= */ + -1, /* (403) windowdefn_list ::= windowdefn */ + -1, /* (404) window ::= frame_opt */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -172468,10 +180199,10 @@ static YYACTIONTYPE yy_reduce( /********** Begin reduce actions **********************************************/ YYMINORTYPE yylhsminor; case 0: /* explain ::= EXPLAIN */ -{ pParse->explain = 1; } +{ if( pParse->pReprepare==0 ) pParse->explain = 1; } break; case 1: /* explain ::= EXPLAIN QUERY PLAN */ -{ pParse->explain = 2; } +{ if( pParse->pReprepare==0 ) pParse->explain = 2; } break; case 2: /* cmdx ::= cmd */ { sqlite3FinishCoding(pParse); } @@ -172485,7 +180216,7 @@ static YYACTIONTYPE yy_reduce( case 5: /* transtype ::= DEFERRED */ case 6: /* transtype ::= IMMEDIATE */ yytestcase(yyruleno==6); case 7: /* transtype ::= EXCLUSIVE */ yytestcase(yyruleno==7); - case 323: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==323); + case 321: /* range_or_rows ::= RANGE|ROWS|GROUPS */ yytestcase(yyruleno==321); {yymsp[0].minor.yy394 = yymsp[0].major; /*A-overwrites-X*/} break; case 8: /* cmd ::= COMMIT|END trans_opt */ @@ -172706,8 +180437,8 @@ static YYACTIONTYPE yy_reduce( break; case 63: /* init_deferred_pred_opt ::= INITIALLY DEFERRED */ case 80: /* ifexists ::= IF EXISTS */ yytestcase(yyruleno==80); - case 216: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==216); - case 219: /* in_op ::= NOT IN */ yytestcase(yyruleno==219); + case 217: /* between_op ::= NOT BETWEEN */ yytestcase(yyruleno==217); + case 220: /* in_op ::= NOT IN */ yytestcase(yyruleno==220); case 245: /* collate ::= COLLATE ID|STRING */ yytestcase(yyruleno==245); {yymsp[-1].minor.yy394 = 1;} break; @@ -172781,7 +180512,6 @@ static YYACTIONTYPE yy_reduce( if( p ){ parserDoubleLinkSelect(pParse, p); } - yymsp[0].minor.yy47 = p; /*A-overwrites-X*/ } break; case 88: /* selectnowith ::= selectnowith multiselect_op oneselect */ @@ -172873,14 +180603,17 @@ static YYACTIONTYPE yy_reduce( case 101: /* selcollist ::= sclp scanpt STAR */ { Expr *p = sqlite3Expr(pParse->db, TK_ASTERISK, 0); + sqlite3ExprSetErrorOffset(p, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse, yymsp[-2].minor.yy322, p); } break; case 102: /* selcollist ::= sclp scanpt nm DOT STAR */ { - Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); - Expr *pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); - Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); + Expr *pRight, *pLeft, *pDot; + pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0); + sqlite3ExprSetErrorOffset(pRight, (int)(yymsp[0].minor.yy0.z - pParse->zTail)); + pLeft = tokenExpr(pParse, TK_ID, yymsp[-2].minor.yy0); + pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight); yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, pDot); } break; @@ -172931,7 +180664,7 @@ static YYACTIONTYPE yy_reduce( { if( yymsp[-5].minor.yy131==0 && yymsp[-1].minor.yy0.n==0 && yymsp[0].minor.yy561.pOn==0 && yymsp[0].minor.yy561.pUsing==0 ){ yymsp[-5].minor.yy131 = yymsp[-3].minor.yy131; - }else if( yymsp[-3].minor.yy131->nSrc==1 ){ + }else if( ALWAYS(yymsp[-3].minor.yy131!=0) && yymsp[-3].minor.yy131->nSrc==1 ){ yymsp[-5].minor.yy131 = sqlite3SrcListAppendFromTerm(pParse,yymsp[-5].minor.yy131,0,0,&yymsp[-1].minor.yy0,0,&yymsp[0].minor.yy561); if( yymsp[-5].minor.yy131 ){ SrcItem *pNew = &yymsp[-5].minor.yy131->a[yymsp[-5].minor.yy131->nSrc-1]; @@ -173059,7 +180792,7 @@ static YYACTIONTYPE yy_reduce( case 146: /* limit_opt ::= */ yytestcase(yyruleno==146); case 151: /* where_opt ::= */ yytestcase(yyruleno==151); case 153: /* where_opt_ret ::= */ yytestcase(yyruleno==153); - case 229: /* case_else ::= */ yytestcase(yyruleno==229); + case 230: /* case_else ::= */ yytestcase(yyruleno==230); case 231: /* case_operand ::= */ yytestcase(yyruleno==231); case 250: /* vinto ::= */ yytestcase(yyruleno==250); {yymsp[1].minor.yy528 = 0;} @@ -173067,7 +180800,7 @@ static YYACTIONTYPE yy_reduce( case 145: /* having_opt ::= HAVING expr */ case 152: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==152); case 154: /* where_opt_ret ::= WHERE expr */ yytestcase(yyruleno==154); - case 228: /* case_else ::= ELSE expr */ yytestcase(yyruleno==228); + case 229: /* case_else ::= ELSE expr */ yytestcase(yyruleno==229); case 249: /* vinto ::= INTO expr */ yytestcase(yyruleno==249); {yymsp[-1].minor.yy528 = yymsp[0].minor.yy528;} break; @@ -173180,11 +180913,10 @@ static YYACTIONTYPE yy_reduce( case 177: /* expr ::= LP expr RP */ {yymsp[-2].minor.yy528 = yymsp[-1].minor.yy528;} break; - case 178: /* expr ::= ID|INDEXED */ - case 179: /* expr ::= JOIN_KW */ yytestcase(yyruleno==179); + case 178: /* expr ::= ID|INDEXED|JOIN_KW */ {yymsp[0].minor.yy528=tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 180: /* expr ::= nm DOT nm */ + case 179: /* expr ::= nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[0].minor.yy0); @@ -173192,7 +180924,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy528 = yylhsminor.yy528; break; - case 181: /* expr ::= nm DOT nm DOT nm */ + case 180: /* expr ::= nm DOT nm DOT nm */ { Expr *temp1 = tokenExpr(pParse,TK_ID,yymsp[-4].minor.yy0); Expr *temp2 = tokenExpr(pParse,TK_ID,yymsp[-2].minor.yy0); @@ -173205,18 +180937,18 @@ static YYACTIONTYPE yy_reduce( } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 182: /* term ::= NULL|FLOAT|BLOB */ - case 183: /* term ::= STRING */ yytestcase(yyruleno==183); + case 181: /* term ::= NULL|FLOAT|BLOB */ + case 182: /* term ::= STRING */ yytestcase(yyruleno==182); {yymsp[0].minor.yy528=tokenExpr(pParse,yymsp[0].major,yymsp[0].minor.yy0); /*A-overwrites-X*/} break; - case 184: /* term ::= INTEGER */ + case 183: /* term ::= INTEGER */ { yylhsminor.yy528 = sqlite3ExprAlloc(pParse->db, TK_INTEGER, &yymsp[0].minor.yy0, 1); if( yylhsminor.yy528 ) yylhsminor.yy528->w.iOfst = (int)(yymsp[0].minor.yy0.z - pParse->zTail); } yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 185: /* expr ::= VARIABLE */ + case 184: /* expr ::= VARIABLE */ { if( !(yymsp[0].minor.yy0.z[0]=='#' && sqlite3Isdigit(yymsp[0].minor.yy0.z[1])) ){ u32 n = yymsp[0].minor.yy0.n; @@ -173238,50 +180970,65 @@ static YYACTIONTYPE yy_reduce( } } break; - case 186: /* expr ::= expr COLLATE ID|STRING */ + case 185: /* expr ::= expr COLLATE ID|STRING */ { yymsp[-2].minor.yy528 = sqlite3ExprAddCollateToken(pParse, yymsp[-2].minor.yy528, &yymsp[0].minor.yy0, 1); } break; - case 187: /* expr ::= CAST LP expr AS typetoken RP */ + case 186: /* expr ::= CAST LP expr AS typetoken RP */ { yymsp[-5].minor.yy528 = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1); sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy528, yymsp[-3].minor.yy528, 0); } break; - case 188: /* expr ::= ID|INDEXED LP distinct exprlist RP */ + case 187: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy394); } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 189: /* expr ::= ID|INDEXED LP STAR RP */ + case 188: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP */ +{ + yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-4].minor.yy322, &yymsp[-7].minor.yy0, yymsp[-5].minor.yy394); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-1].minor.yy322); +} + yymsp[-7].minor.yy528 = yylhsminor.yy528; + break; + case 189: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-3].minor.yy0, 0); } yymsp[-3].minor.yy528 = yylhsminor.yy528; break; - case 190: /* expr ::= ID|INDEXED LP distinct exprlist RP filter_over */ + case 190: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist RP filter_over */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-2].minor.yy322, &yymsp[-5].minor.yy0, yymsp[-3].minor.yy394); sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } yymsp[-5].minor.yy528 = yylhsminor.yy528; break; - case 191: /* expr ::= ID|INDEXED LP STAR RP filter_over */ + case 191: /* expr ::= ID|INDEXED|JOIN_KW LP distinct exprlist ORDER BY sortlist RP filter_over */ +{ + yylhsminor.yy528 = sqlite3ExprFunction(pParse, yymsp[-5].minor.yy322, &yymsp[-8].minor.yy0, yymsp[-6].minor.yy394); + sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); + sqlite3ExprAddFunctionOrderBy(pParse, yylhsminor.yy528, yymsp[-2].minor.yy322); +} + yymsp[-8].minor.yy528 = yylhsminor.yy528; + break; + case 192: /* expr ::= ID|INDEXED|JOIN_KW LP STAR RP filter_over */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[-4].minor.yy0, 0); sqlite3WindowAttach(pParse, yylhsminor.yy528, yymsp[0].minor.yy41); } yymsp[-4].minor.yy528 = yylhsminor.yy528; break; - case 192: /* term ::= CTIME_KW */ + case 193: /* term ::= CTIME_KW */ { yylhsminor.yy528 = sqlite3ExprFunction(pParse, 0, &yymsp[0].minor.yy0, 0); } yymsp[0].minor.yy528 = yylhsminor.yy528; break; - case 193: /* expr ::= LP nexprlist COMMA expr RP */ + case 194: /* expr ::= LP nexprlist COMMA expr RP */ { ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy322, yymsp[-1].minor.yy528); yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_VECTOR, 0, 0); @@ -173295,22 +181042,22 @@ static YYACTIONTYPE yy_reduce( } } break; - case 194: /* expr ::= expr AND expr */ + case 195: /* expr ::= expr AND expr */ {yymsp[-2].minor.yy528=sqlite3ExprAnd(pParse,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 195: /* expr ::= expr OR expr */ - case 196: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==196); - case 197: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==197); - case 198: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==198); - case 199: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==199); - case 200: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==200); - case 201: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==201); + case 196: /* expr ::= expr OR expr */ + case 197: /* expr ::= expr LT|GT|GE|LE expr */ yytestcase(yyruleno==197); + case 198: /* expr ::= expr EQ|NE expr */ yytestcase(yyruleno==198); + case 199: /* expr ::= expr BITAND|BITOR|LSHIFT|RSHIFT expr */ yytestcase(yyruleno==199); + case 200: /* expr ::= expr PLUS|MINUS expr */ yytestcase(yyruleno==200); + case 201: /* expr ::= expr STAR|SLASH|REM expr */ yytestcase(yyruleno==201); + case 202: /* expr ::= expr CONCAT expr */ yytestcase(yyruleno==202); {yymsp[-2].minor.yy528=sqlite3PExpr(pParse,yymsp[-1].major,yymsp[-2].minor.yy528,yymsp[0].minor.yy528);} break; - case 202: /* likeop ::= NOT LIKE_KW|MATCH */ + case 203: /* likeop ::= NOT LIKE_KW|MATCH */ {yymsp[-1].minor.yy0=yymsp[0].minor.yy0; yymsp[-1].minor.yy0.n|=0x80000000; /*yymsp[-1].minor.yy0-overwrite-yymsp[0].minor.yy0*/} break; - case 203: /* expr ::= expr likeop expr */ + case 204: /* expr ::= expr likeop expr */ { ExprList *pList; int bNot = yymsp[-1].minor.yy0.n & 0x80000000; @@ -173322,7 +181069,7 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-2].minor.yy528 ) yymsp[-2].minor.yy528->flags |= EP_InfixFunc; } break; - case 204: /* expr ::= expr likeop expr ESCAPE expr */ + case 205: /* expr ::= expr likeop expr ESCAPE expr */ { ExprList *pList; int bNot = yymsp[-3].minor.yy0.n & 0x80000000; @@ -173335,47 +181082,47 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-4].minor.yy528 ) yymsp[-4].minor.yy528->flags |= EP_InfixFunc; } break; - case 205: /* expr ::= expr ISNULL|NOTNULL */ + case 206: /* expr ::= expr ISNULL|NOTNULL */ {yymsp[-1].minor.yy528 = sqlite3PExpr(pParse,yymsp[0].major,yymsp[-1].minor.yy528,0);} break; - case 206: /* expr ::= expr NOT NULL */ + case 207: /* expr ::= expr NOT NULL */ {yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_NOTNULL,yymsp[-2].minor.yy528,0);} break; - case 207: /* expr ::= expr IS expr */ + case 208: /* expr ::= expr IS expr */ { yymsp[-2].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-2].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-2].minor.yy528, TK_ISNULL); } break; - case 208: /* expr ::= expr IS NOT expr */ + case 209: /* expr ::= expr IS NOT expr */ { yymsp[-3].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-3].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-3].minor.yy528, TK_NOTNULL); } break; - case 209: /* expr ::= expr IS NOT DISTINCT FROM expr */ + case 210: /* expr ::= expr IS NOT DISTINCT FROM expr */ { yymsp[-5].minor.yy528 = sqlite3PExpr(pParse,TK_IS,yymsp[-5].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-5].minor.yy528, TK_ISNULL); } break; - case 210: /* expr ::= expr IS DISTINCT FROM expr */ + case 211: /* expr ::= expr IS DISTINCT FROM expr */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse,TK_ISNOT,yymsp[-4].minor.yy528,yymsp[0].minor.yy528); binaryToUnaryIfNull(pParse, yymsp[0].minor.yy528, yymsp[-4].minor.yy528, TK_NOTNULL); } break; - case 211: /* expr ::= NOT expr */ - case 212: /* expr ::= BITNOT expr */ yytestcase(yyruleno==212); + case 212: /* expr ::= NOT expr */ + case 213: /* expr ::= BITNOT expr */ yytestcase(yyruleno==213); {yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major, yymsp[0].minor.yy528, 0);/*A-overwrites-B*/} break; - case 213: /* expr ::= PLUS|MINUS expr */ + case 214: /* expr ::= PLUS|MINUS expr */ { yymsp[-1].minor.yy528 = sqlite3PExpr(pParse, yymsp[-1].major==TK_PLUS ? TK_UPLUS : TK_UMINUS, yymsp[0].minor.yy528, 0); /*A-overwrites-B*/ } break; - case 214: /* expr ::= expr PTR expr */ + case 215: /* expr ::= expr PTR expr */ { ExprList *pList = sqlite3ExprListAppend(pParse, 0, yymsp[-2].minor.yy528); pList = sqlite3ExprListAppend(pParse, pList, yymsp[0].minor.yy528); @@ -173383,11 +181130,11 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy528 = yylhsminor.yy528; break; - case 215: /* between_op ::= BETWEEN */ - case 218: /* in_op ::= IN */ yytestcase(yyruleno==218); + case 216: /* between_op ::= BETWEEN */ + case 219: /* in_op ::= IN */ yytestcase(yyruleno==219); {yymsp[0].minor.yy394 = 0;} break; - case 217: /* expr ::= expr between_op expr AND expr */ + case 218: /* expr ::= expr between_op expr AND expr */ { ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy528); @@ -173400,7 +181147,7 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 220: /* expr ::= expr in_op LP exprlist RP */ + case 221: /* expr ::= expr in_op LP exprlist RP */ { if( yymsp[-1].minor.yy322==0 ){ /* Expressions of the form @@ -173421,6 +181168,11 @@ static YYACTIONTYPE yy_reduce( sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0); yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy528, pRHS); + }else if( yymsp[-1].minor.yy322->nExpr==1 && pRHS->op==TK_SELECT ){ + yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); + sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pRHS->x.pSelect); + pRHS->x.pSelect = 0; + sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322); }else{ yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); if( yymsp[-4].minor.yy528==0 ){ @@ -173441,20 +181193,20 @@ static YYACTIONTYPE yy_reduce( } } break; - case 221: /* expr ::= LP select RP */ + case 222: /* expr ::= LP select RP */ { yymsp[-2].minor.yy528 = sqlite3PExpr(pParse, TK_SELECT, 0, 0); sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy528, yymsp[-1].minor.yy47); } break; - case 222: /* expr ::= expr in_op LP select RP */ + case 223: /* expr ::= expr in_op LP select RP */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0); sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, yymsp[-1].minor.yy47); if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 223: /* expr ::= expr in_op nm dbnm paren_exprlist */ + case 224: /* expr ::= expr in_op nm dbnm paren_exprlist */ { SrcList *pSrc = sqlite3SrcListAppend(pParse, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0); Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0); @@ -173464,14 +181216,14 @@ static YYACTIONTYPE yy_reduce( if( yymsp[-3].minor.yy394 ) yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_NOT, yymsp[-4].minor.yy528, 0); } break; - case 224: /* expr ::= EXISTS LP select RP */ + case 225: /* expr ::= EXISTS LP select RP */ { Expr *p; p = yymsp[-3].minor.yy528 = sqlite3PExpr(pParse, TK_EXISTS, 0, 0); sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy47); } break; - case 225: /* expr ::= CASE case_operand case_exprlist case_else END */ + case 226: /* expr ::= CASE case_operand case_exprlist case_else END */ { yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy528, 0); if( yymsp[-4].minor.yy528 ){ @@ -173483,21 +181235,18 @@ static YYACTIONTYPE yy_reduce( } } break; - case 226: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ + case 227: /* case_exprlist ::= case_exprlist WHEN expr THEN expr */ { yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[-2].minor.yy528); yymsp[-4].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-4].minor.yy322, yymsp[0].minor.yy528); } break; - case 227: /* case_exprlist ::= WHEN expr THEN expr */ + case 228: /* case_exprlist ::= WHEN expr THEN expr */ { yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy528); yymsp[-3].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy322, yymsp[0].minor.yy528); } break; - case 230: /* case_operand ::= expr */ -{yymsp[0].minor.yy528 = yymsp[0].minor.yy528; /*A-overwrites-X*/} - break; case 233: /* nexprlist ::= nexprlist COMMA expr */ {yymsp[-2].minor.yy322 = sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy322,yymsp[0].minor.yy528);} break; @@ -173773,11 +181522,7 @@ static YYACTIONTYPE yy_reduce( yymsp[-2].minor.yy521 = sqlite3WithAdd(pParse, yymsp[-2].minor.yy521, yymsp[0].minor.yy385); } break; - case 311: /* windowdefn_list ::= windowdefn */ -{ yylhsminor.yy41 = yymsp[0].minor.yy41; } - yymsp[0].minor.yy41 = yylhsminor.yy41; - break; - case 312: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ + case 311: /* windowdefn_list ::= windowdefn_list COMMA windowdefn */ { assert( yymsp[0].minor.yy41!=0 ); sqlite3WindowChain(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy41); @@ -173786,7 +181531,7 @@ static YYACTIONTYPE yy_reduce( } yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 313: /* windowdefn ::= nm AS LP window RP */ + case 312: /* windowdefn ::= nm AS LP window RP */ { if( ALWAYS(yymsp[-1].minor.yy41) ){ yymsp[-1].minor.yy41->zName = sqlite3DbStrNDup(pParse->db, yymsp[-4].minor.yy0.z, yymsp[-4].minor.yy0.n); @@ -173795,90 +181540,83 @@ static YYACTIONTYPE yy_reduce( } yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 314: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ + case 313: /* window ::= PARTITION BY nexprlist orderby_opt frame_opt */ { yymsp[-4].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, 0); } break; - case 315: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ + case 314: /* window ::= nm PARTITION BY nexprlist orderby_opt frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, yymsp[-2].minor.yy322, yymsp[-1].minor.yy322, &yymsp[-5].minor.yy0); } yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 316: /* window ::= ORDER BY sortlist frame_opt */ + case 315: /* window ::= ORDER BY sortlist frame_opt */ { yymsp[-3].minor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, 0); } break; - case 317: /* window ::= nm ORDER BY sortlist frame_opt */ + case 316: /* window ::= nm ORDER BY sortlist frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, yymsp[-1].minor.yy322, &yymsp[-4].minor.yy0); } yymsp[-4].minor.yy41 = yylhsminor.yy41; break; - case 318: /* window ::= frame_opt */ - case 337: /* filter_over ::= over_clause */ yytestcase(yyruleno==337); -{ - yylhsminor.yy41 = yymsp[0].minor.yy41; -} - yymsp[0].minor.yy41 = yylhsminor.yy41; - break; - case 319: /* window ::= nm frame_opt */ + case 317: /* window ::= nm frame_opt */ { yylhsminor.yy41 = sqlite3WindowAssemble(pParse, yymsp[0].minor.yy41, 0, 0, &yymsp[-1].minor.yy0); } yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 320: /* frame_opt ::= */ + case 318: /* frame_opt ::= */ { yymsp[1].minor.yy41 = sqlite3WindowAlloc(pParse, 0, TK_UNBOUNDED, 0, TK_CURRENT, 0, 0); } break; - case 321: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ + case 319: /* frame_opt ::= range_or_rows frame_bound_s frame_exclude_opt */ { yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-2].minor.yy394, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, TK_CURRENT, 0, yymsp[0].minor.yy516); } yymsp[-2].minor.yy41 = yylhsminor.yy41; break; - case 322: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ + case 320: /* frame_opt ::= range_or_rows BETWEEN frame_bound_s AND frame_bound_e frame_exclude_opt */ { yylhsminor.yy41 = sqlite3WindowAlloc(pParse, yymsp[-5].minor.yy394, yymsp[-3].minor.yy595.eType, yymsp[-3].minor.yy595.pExpr, yymsp[-1].minor.yy595.eType, yymsp[-1].minor.yy595.pExpr, yymsp[0].minor.yy516); } yymsp[-5].minor.yy41 = yylhsminor.yy41; break; - case 324: /* frame_bound_s ::= frame_bound */ - case 326: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==326); + case 322: /* frame_bound_s ::= frame_bound */ + case 324: /* frame_bound_e ::= frame_bound */ yytestcase(yyruleno==324); {yylhsminor.yy595 = yymsp[0].minor.yy595;} yymsp[0].minor.yy595 = yylhsminor.yy595; break; - case 325: /* frame_bound_s ::= UNBOUNDED PRECEDING */ - case 327: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==327); - case 329: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==329); + case 323: /* frame_bound_s ::= UNBOUNDED PRECEDING */ + case 325: /* frame_bound_e ::= UNBOUNDED FOLLOWING */ yytestcase(yyruleno==325); + case 327: /* frame_bound ::= CURRENT ROW */ yytestcase(yyruleno==327); {yylhsminor.yy595.eType = yymsp[-1].major; yylhsminor.yy595.pExpr = 0;} yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 328: /* frame_bound ::= expr PRECEDING|FOLLOWING */ + case 326: /* frame_bound ::= expr PRECEDING|FOLLOWING */ {yylhsminor.yy595.eType = yymsp[0].major; yylhsminor.yy595.pExpr = yymsp[-1].minor.yy528;} yymsp[-1].minor.yy595 = yylhsminor.yy595; break; - case 330: /* frame_exclude_opt ::= */ + case 328: /* frame_exclude_opt ::= */ {yymsp[1].minor.yy516 = 0;} break; - case 331: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ + case 329: /* frame_exclude_opt ::= EXCLUDE frame_exclude */ {yymsp[-1].minor.yy516 = yymsp[0].minor.yy516;} break; - case 332: /* frame_exclude ::= NO OTHERS */ - case 333: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==333); + case 330: /* frame_exclude ::= NO OTHERS */ + case 331: /* frame_exclude ::= CURRENT ROW */ yytestcase(yyruleno==331); {yymsp[-1].minor.yy516 = yymsp[-1].major; /*A-overwrites-X*/} break; - case 334: /* frame_exclude ::= GROUP|TIES */ + case 332: /* frame_exclude ::= GROUP|TIES */ {yymsp[0].minor.yy516 = yymsp[0].major; /*A-overwrites-X*/} break; - case 335: /* window_clause ::= WINDOW windowdefn_list */ + case 333: /* window_clause ::= WINDOW windowdefn_list */ { yymsp[-1].minor.yy41 = yymsp[0].minor.yy41; } break; - case 336: /* filter_over ::= filter_clause over_clause */ + case 334: /* filter_over ::= filter_clause over_clause */ { if( yymsp[0].minor.yy41 ){ yymsp[0].minor.yy41->pFilter = yymsp[-1].minor.yy528; @@ -173889,7 +181627,13 @@ static YYACTIONTYPE yy_reduce( } yymsp[-1].minor.yy41 = yylhsminor.yy41; break; - case 338: /* filter_over ::= filter_clause */ + case 335: /* filter_over ::= over_clause */ +{ + yylhsminor.yy41 = yymsp[0].minor.yy41; +} + yymsp[0].minor.yy41 = yylhsminor.yy41; + break; + case 336: /* filter_over ::= filter_clause */ { yylhsminor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); if( yylhsminor.yy41 ){ @@ -173901,13 +181645,13 @@ static YYACTIONTYPE yy_reduce( } yymsp[0].minor.yy41 = yylhsminor.yy41; break; - case 339: /* over_clause ::= OVER LP window RP */ + case 337: /* over_clause ::= OVER LP window RP */ { yymsp[-3].minor.yy41 = yymsp[-1].minor.yy41; assert( yymsp[-3].minor.yy41!=0 ); } break; - case 340: /* over_clause ::= OVER nm */ + case 338: /* over_clause ::= OVER nm */ { yymsp[-1].minor.yy41 = (Window*)sqlite3DbMallocZero(pParse->db, sizeof(Window)); if( yymsp[-1].minor.yy41 ){ @@ -173915,73 +181659,75 @@ static YYACTIONTYPE yy_reduce( } } break; - case 341: /* filter_clause ::= FILTER LP WHERE expr RP */ + case 339: /* filter_clause ::= FILTER LP WHERE expr RP */ { yymsp[-4].minor.yy528 = yymsp[-1].minor.yy528; } break; default: - /* (342) input ::= cmdlist */ yytestcase(yyruleno==342); - /* (343) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==343); - /* (344) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=344); - /* (345) ecmd ::= SEMI */ yytestcase(yyruleno==345); - /* (346) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==346); - /* (347) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=347); - /* (348) trans_opt ::= */ yytestcase(yyruleno==348); - /* (349) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==349); - /* (350) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==350); - /* (351) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==351); - /* (352) savepoint_opt ::= */ yytestcase(yyruleno==352); - /* (353) cmd ::= create_table create_table_args */ yytestcase(yyruleno==353); - /* (354) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=354); - /* (355) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==355); - /* (356) columnlist ::= columnname carglist */ yytestcase(yyruleno==356); - /* (357) nm ::= ID|INDEXED */ yytestcase(yyruleno==357); - /* (358) nm ::= STRING */ yytestcase(yyruleno==358); - /* (359) nm ::= JOIN_KW */ yytestcase(yyruleno==359); - /* (360) typetoken ::= typename */ yytestcase(yyruleno==360); - /* (361) typename ::= ID|STRING */ yytestcase(yyruleno==361); - /* (362) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=362); - /* (363) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=363); - /* (364) carglist ::= carglist ccons */ yytestcase(yyruleno==364); - /* (365) carglist ::= */ yytestcase(yyruleno==365); - /* (366) ccons ::= NULL onconf */ yytestcase(yyruleno==366); - /* (367) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==367); - /* (368) ccons ::= AS generated */ yytestcase(yyruleno==368); - /* (369) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==369); - /* (370) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==370); - /* (371) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=371); - /* (372) tconscomma ::= */ yytestcase(yyruleno==372); - /* (373) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=373); - /* (374) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=374); - /* (375) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=375); - /* (376) oneselect ::= values */ yytestcase(yyruleno==376); - /* (377) sclp ::= selcollist COMMA */ yytestcase(yyruleno==377); - /* (378) as ::= ID|STRING */ yytestcase(yyruleno==378); - /* (379) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=379); - /* (380) returning ::= */ yytestcase(yyruleno==380); - /* (381) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=381); - /* (382) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==382); - /* (383) exprlist ::= nexprlist */ yytestcase(yyruleno==383); - /* (384) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=384); - /* (385) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=385); - /* (386) nmnum ::= ON */ yytestcase(yyruleno==386); - /* (387) nmnum ::= DELETE */ yytestcase(yyruleno==387); - /* (388) nmnum ::= DEFAULT */ yytestcase(yyruleno==388); - /* (389) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==389); - /* (390) foreach_clause ::= */ yytestcase(yyruleno==390); - /* (391) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==391); - /* (392) trnm ::= nm */ yytestcase(yyruleno==392); - /* (393) tridxby ::= */ yytestcase(yyruleno==393); - /* (394) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==394); - /* (395) database_kw_opt ::= */ yytestcase(yyruleno==395); - /* (396) kwcolumn_opt ::= */ yytestcase(yyruleno==396); - /* (397) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==397); - /* (398) vtabarglist ::= vtabarg */ yytestcase(yyruleno==398); - /* (399) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==399); - /* (400) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==400); - /* (401) anylist ::= */ yytestcase(yyruleno==401); - /* (402) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==402); - /* (403) anylist ::= anylist ANY */ yytestcase(yyruleno==403); - /* (404) with ::= */ yytestcase(yyruleno==404); + /* (340) input ::= cmdlist */ yytestcase(yyruleno==340); + /* (341) cmdlist ::= cmdlist ecmd */ yytestcase(yyruleno==341); + /* (342) cmdlist ::= ecmd (OPTIMIZED OUT) */ assert(yyruleno!=342); + /* (343) ecmd ::= SEMI */ yytestcase(yyruleno==343); + /* (344) ecmd ::= cmdx SEMI */ yytestcase(yyruleno==344); + /* (345) ecmd ::= explain cmdx SEMI (NEVER REDUCES) */ assert(yyruleno!=345); + /* (346) trans_opt ::= */ yytestcase(yyruleno==346); + /* (347) trans_opt ::= TRANSACTION */ yytestcase(yyruleno==347); + /* (348) trans_opt ::= TRANSACTION nm */ yytestcase(yyruleno==348); + /* (349) savepoint_opt ::= SAVEPOINT */ yytestcase(yyruleno==349); + /* (350) savepoint_opt ::= */ yytestcase(yyruleno==350); + /* (351) cmd ::= create_table create_table_args */ yytestcase(yyruleno==351); + /* (352) table_option_set ::= table_option (OPTIMIZED OUT) */ assert(yyruleno!=352); + /* (353) columnlist ::= columnlist COMMA columnname carglist */ yytestcase(yyruleno==353); + /* (354) columnlist ::= columnname carglist */ yytestcase(yyruleno==354); + /* (355) nm ::= ID|INDEXED|JOIN_KW */ yytestcase(yyruleno==355); + /* (356) nm ::= STRING */ yytestcase(yyruleno==356); + /* (357) typetoken ::= typename */ yytestcase(yyruleno==357); + /* (358) typename ::= ID|STRING */ yytestcase(yyruleno==358); + /* (359) signed ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=359); + /* (360) signed ::= minus_num (OPTIMIZED OUT) */ assert(yyruleno!=360); + /* (361) carglist ::= carglist ccons */ yytestcase(yyruleno==361); + /* (362) carglist ::= */ yytestcase(yyruleno==362); + /* (363) ccons ::= NULL onconf */ yytestcase(yyruleno==363); + /* (364) ccons ::= GENERATED ALWAYS AS generated */ yytestcase(yyruleno==364); + /* (365) ccons ::= AS generated */ yytestcase(yyruleno==365); + /* (366) conslist_opt ::= COMMA conslist */ yytestcase(yyruleno==366); + /* (367) conslist ::= conslist tconscomma tcons */ yytestcase(yyruleno==367); + /* (368) conslist ::= tcons (OPTIMIZED OUT) */ assert(yyruleno!=368); + /* (369) tconscomma ::= */ yytestcase(yyruleno==369); + /* (370) defer_subclause_opt ::= defer_subclause (OPTIMIZED OUT) */ assert(yyruleno!=370); + /* (371) resolvetype ::= raisetype (OPTIMIZED OUT) */ assert(yyruleno!=371); + /* (372) selectnowith ::= oneselect (OPTIMIZED OUT) */ assert(yyruleno!=372); + /* (373) oneselect ::= values */ yytestcase(yyruleno==373); + /* (374) sclp ::= selcollist COMMA */ yytestcase(yyruleno==374); + /* (375) as ::= ID|STRING */ yytestcase(yyruleno==375); + /* (376) indexed_opt ::= indexed_by (OPTIMIZED OUT) */ assert(yyruleno!=376); + /* (377) returning ::= */ yytestcase(yyruleno==377); + /* (378) expr ::= term (OPTIMIZED OUT) */ assert(yyruleno!=378); + /* (379) likeop ::= LIKE_KW|MATCH */ yytestcase(yyruleno==379); + /* (380) case_operand ::= expr */ yytestcase(yyruleno==380); + /* (381) exprlist ::= nexprlist */ yytestcase(yyruleno==381); + /* (382) nmnum ::= plus_num (OPTIMIZED OUT) */ assert(yyruleno!=382); + /* (383) nmnum ::= nm (OPTIMIZED OUT) */ assert(yyruleno!=383); + /* (384) nmnum ::= ON */ yytestcase(yyruleno==384); + /* (385) nmnum ::= DELETE */ yytestcase(yyruleno==385); + /* (386) nmnum ::= DEFAULT */ yytestcase(yyruleno==386); + /* (387) plus_num ::= INTEGER|FLOAT */ yytestcase(yyruleno==387); + /* (388) foreach_clause ::= */ yytestcase(yyruleno==388); + /* (389) foreach_clause ::= FOR EACH ROW */ yytestcase(yyruleno==389); + /* (390) trnm ::= nm */ yytestcase(yyruleno==390); + /* (391) tridxby ::= */ yytestcase(yyruleno==391); + /* (392) database_kw_opt ::= DATABASE */ yytestcase(yyruleno==392); + /* (393) database_kw_opt ::= */ yytestcase(yyruleno==393); + /* (394) kwcolumn_opt ::= */ yytestcase(yyruleno==394); + /* (395) kwcolumn_opt ::= COLUMNKW */ yytestcase(yyruleno==395); + /* (396) vtabarglist ::= vtabarg */ yytestcase(yyruleno==396); + /* (397) vtabarglist ::= vtabarglist COMMA vtabarg */ yytestcase(yyruleno==397); + /* (398) vtabarg ::= vtabarg vtabargtoken */ yytestcase(yyruleno==398); + /* (399) anylist ::= */ yytestcase(yyruleno==399); + /* (400) anylist ::= anylist LP anylist RP */ yytestcase(yyruleno==400); + /* (401) anylist ::= anylist ANY */ yytestcase(yyruleno==401); + /* (402) with ::= */ yytestcase(yyruleno==402); + /* (403) windowdefn_list ::= windowdefn (OPTIMIZED OUT) */ assert(yyruleno!=403); + /* (404) window ::= frame_opt (OPTIMIZED OUT) */ assert(yyruleno!=404); break; /********** End reduce actions ************************************************/ }; @@ -174557,7 +182303,7 @@ static const unsigned char aKWHash[127] = { /* aKWNext[] forms the hash collision chain. If aKWHash[i]==0 ** then the i-th keyword has no more hash collisions. Otherwise, ** the next keyword with the same hash is aKWHash[i]-1. */ -static const unsigned char aKWNext[147] = { +static const unsigned char aKWNext[148] = {0, 0, 0, 0, 0, 4, 0, 43, 0, 0, 106, 114, 0, 0, 0, 2, 0, 0, 143, 0, 0, 0, 13, 0, 0, 0, 0, 141, 0, 0, 119, 52, 0, 0, 137, 12, 0, 0, 62, 0, @@ -174572,7 +182318,7 @@ static const unsigned char aKWNext[147] = { 102, 0, 0, 87, }; /* aKWLen[i] is the length (in bytes) of the i-th keyword */ -static const unsigned char aKWLen[147] = { +static const unsigned char aKWLen[148] = {0, 7, 7, 5, 4, 6, 4, 5, 3, 6, 7, 3, 6, 6, 7, 7, 3, 8, 2, 6, 5, 4, 4, 3, 10, 4, 7, 6, 9, 4, 2, 6, 5, 9, 9, 4, 7, 3, 2, 4, @@ -174588,7 +182334,7 @@ static const unsigned char aKWLen[147] = { }; /* aKWOffset[i] is the index into zKWText[] of the start of ** the text for the i-th keyword. */ -static const unsigned short int aKWOffset[147] = { +static const unsigned short int aKWOffset[148] = {0, 0, 2, 2, 8, 9, 14, 16, 20, 23, 25, 25, 29, 33, 36, 41, 46, 48, 53, 54, 59, 62, 65, 67, 69, 78, 81, 86, 90, 90, 94, 99, 101, 105, 111, 119, 123, 123, 123, 126, @@ -174603,7 +182349,7 @@ static const unsigned short int aKWOffset[147] = { 648, 650, 655, 659, }; /* aKWCode[i] is the parser symbol code for the i-th keyword */ -static const unsigned char aKWCode[147] = { +static const unsigned char aKWCode[148] = {0, TK_REINDEX, TK_INDEXED, TK_INDEX, TK_DESC, TK_ESCAPE, TK_EACH, TK_CHECK, TK_KEY, TK_BEFORE, TK_FOREIGN, TK_FOR, TK_IGNORE, TK_LIKE_KW, TK_EXPLAIN, TK_INSTEAD, @@ -174770,185 +182516,185 @@ static const unsigned char aKWCode[147] = { static int keywordCode(const char *z, int n, int *pType){ int i, j; const char *zKW; - if( n>=2 ){ - i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127; - for(i=((int)aKWHash[i])-1; i>=0; i=((int)aKWNext[i])-1){ - if( aKWLen[i]!=n ) continue; - zKW = &zKWText[aKWOffset[i]]; + assert( n>=2 ); + i = ((charMap(z[0])*4) ^ (charMap(z[n-1])*3) ^ n*1) % 127; + for(i=(int)aKWHash[i]; i>0; i=aKWNext[i]){ + if( aKWLen[i]!=n ) continue; + zKW = &zKWText[aKWOffset[i]]; #ifdef SQLITE_ASCII - if( (z[0]&~0x20)!=zKW[0] ) continue; - if( (z[1]&~0x20)!=zKW[1] ) continue; - j = 2; - while( j=2 ) keywordCode((char*)z, n, &id); return id; } #define SQLITE_N_KEYWORD 147 SQLITE_API int sqlite3_keyword_name(int i,const char **pzName,int *pnName){ if( i<0 || i>=SQLITE_N_KEYWORD ) return SQLITE_ERROR; + i++; *pzName = zKWText + aKWOffset[i]; *pnName = aKWLen[i]; return SQLITE_OK; @@ -175247,7 +182993,7 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ testcase( z[0]=='0' ); testcase( z[0]=='1' ); testcase( z[0]=='2' ); testcase( z[0]=='3' ); testcase( z[0]=='4' ); testcase( z[0]=='5' ); testcase( z[0]=='6' ); testcase( z[0]=='7' ); testcase( z[0]=='8' ); - testcase( z[0]=='9' ); + testcase( z[0]=='9' ); testcase( z[0]=='.' ); *tokenType = TK_INTEGER; #ifndef SQLITE_OMIT_HEX_INTEGER if( z[0]=='0' && (z[1]=='x' || z[1]=='X') && sqlite3Isxdigit(z[2]) ){ @@ -175319,7 +183065,8 @@ SQLITE_PRIVATE int sqlite3GetToken(const unsigned char *z, int *tokenType){ return i; } case CC_KYWD0: { - for(i=1; aiClass[z[i]]<=CC_KYWD; i++){} + if( aiClass[z[1]]>CC_KYWD ){ i = 1; break; } + for(i=2; aiClass[z[i]]<=CC_KYWD; i++){} if( IdChar(z[i]) ){ /* This token started out using characters that can appear in keywords, ** but z[i] is a character not allowed within keywords, so this must @@ -175525,7 +183272,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql){ if( pParse->pNewTrigger && !IN_RENAME_OBJECT ){ sqlite3DeleteTrigger(db, pParse->pNewTrigger); } - if( pParse->pVList ) sqlite3DbFreeNN(db, pParse->pVList); + if( pParse->pVList ) sqlite3DbNNFreeNN(db, pParse->pVList); db->pParse = pParentParse; assert( nErr==0 || pParse->rc!=SQLITE_OK ); return nErr; @@ -176098,30 +183845,20 @@ static int sqlite3TestExtInit(sqlite3 *db){ ** Forward declarations of external module initializer functions ** for modules that need them. */ -#ifdef SQLITE_ENABLE_FTS1 -SQLITE_PRIVATE int sqlite3Fts1Init(sqlite3*); -#endif -#ifdef SQLITE_ENABLE_FTS2 -SQLITE_PRIVATE int sqlite3Fts2Init(sqlite3*); -#endif #ifdef SQLITE_ENABLE_FTS5 SQLITE_PRIVATE int sqlite3Fts5Init(sqlite3*); #endif #ifdef SQLITE_ENABLE_STMTVTAB SQLITE_PRIVATE int sqlite3StmtVtabInit(sqlite3*); #endif - +#ifdef SQLITE_EXTRA_AUTOEXT +int SQLITE_EXTRA_AUTOEXT(sqlite3*); +#endif /* ** An array of pointers to extension initializer functions for ** built-in extensions. */ static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = { -#ifdef SQLITE_ENABLE_FTS1 - sqlite3Fts1Init, -#endif -#ifdef SQLITE_ENABLE_FTS2 - sqlite3Fts2Init, -#endif #ifdef SQLITE_ENABLE_FTS3 sqlite3Fts3Init, #endif @@ -176150,6 +183887,9 @@ static int (*const sqlite3BuiltinExtensions[])(sqlite3*) = { #ifdef SQLITE_ENABLE_BYTECODE_VTAB sqlite3VdbeBytecodeVtabInit, #endif +#ifdef SQLITE_EXTRA_AUTOEXT + SQLITE_EXTRA_AUTOEXT, +#endif }; #ifndef SQLITE_AMALGAMATION @@ -176223,6 +183963,32 @@ SQLITE_API char *sqlite3_temp_directory = 0; */ SQLITE_API char *sqlite3_data_directory = 0; +/* +** Determine whether or not high-precision (long double) floating point +** math works correctly on CPU currently running. +*/ +static SQLITE_NOINLINE int hasHighPrecisionDouble(int rc){ + if( sizeof(LONGDOUBLE_TYPE)<=8 ){ + /* If the size of "long double" is not more than 8, then + ** high-precision math is not possible. */ + return 0; + }else{ + /* Just because sizeof(long double)>8 does not mean that the underlying + ** hardware actually supports high-precision floating point. For example, + ** clearing the 0x100 bit in the floating-point control word on Intel + ** processors will make long double work like double, even though long + ** double takes up more space. The only way to determine if long double + ** actually works is to run an experiment. */ + LONGDOUBLE_TYPE a, b, c; + rc++; + a = 1.0+rc*0.1; + b = 1.0e+18+rc*25.0; + c = a+b; + return b!=c; + } +} + + /* ** Initialize SQLite. ** @@ -176418,6 +184184,12 @@ SQLITE_API int sqlite3_initialize(void){ } #endif + /* Experimentally determine if high-precision floating point is + ** available. */ +#ifndef SQLITE_OMIT_WSD + sqlite3Config.bUseLongDouble = hasHighPrecisionDouble(rc); +#endif + return rc; } @@ -176487,9 +184259,21 @@ SQLITE_API int sqlite3_config(int op, ...){ va_list ap; int rc = SQLITE_OK; - /* sqlite3_config() shall return SQLITE_MISUSE if it is invoked while - ** the SQLite library is in use. */ - if( sqlite3GlobalConfig.isInit ) return SQLITE_MISUSE_BKPT; + /* sqlite3_config() normally returns SQLITE_MISUSE if it is invoked while + ** the SQLite library is in use. Except, a few selected opcodes + ** are allowed. + */ + if( sqlite3GlobalConfig.isInit ){ + static const u64 mAnytimeConfigOption = 0 + | MASKBIT64( SQLITE_CONFIG_LOG ) + | MASKBIT64( SQLITE_CONFIG_PCACHE_HDRSZ ) + ; + if( op<0 || op>63 || (MASKBIT64(op) & mAnytimeConfigOption)==0 ){ + return SQLITE_MISUSE_BKPT; + } + testcase( op==SQLITE_CONFIG_LOG ); + testcase( op==SQLITE_CONFIG_PCACHE_HDRSZ ); + } va_start(ap, op); switch( op ){ @@ -176558,6 +184342,7 @@ SQLITE_API int sqlite3_config(int op, ...){ break; } case SQLITE_CONFIG_MEMSTATUS: { + assert( !sqlite3GlobalConfig.isInit ); /* Cannot change at runtime */ /* EVIDENCE-OF: R-61275-35157 The SQLITE_CONFIG_MEMSTATUS option takes ** single argument of type int, interpreted as a boolean, which enables ** or disables the collection of memory allocation statistics. */ @@ -176681,8 +184466,10 @@ SQLITE_API int sqlite3_config(int op, ...){ ** sqlite3GlobalConfig.xLog = va_arg(ap, void(*)(void*,int,const char*)); */ typedef void(*LOGFUNC_t)(void*,int,const char*); - sqlite3GlobalConfig.xLog = va_arg(ap, LOGFUNC_t); - sqlite3GlobalConfig.pLogArg = va_arg(ap, void*); + LOGFUNC_t xLog = va_arg(ap, LOGFUNC_t); + void *pLogArg = va_arg(ap, void*); + AtomicStore(&sqlite3GlobalConfig.xLog, xLog); + AtomicStore(&sqlite3GlobalConfig.pLogArg, pLogArg); break; } @@ -176696,7 +184483,8 @@ SQLITE_API int sqlite3_config(int op, ...){ ** argument of type int. If non-zero, then URI handling is globally ** enabled. If the parameter is zero, then URI handling is globally ** disabled. */ - sqlite3GlobalConfig.bOpenUri = va_arg(ap, int); + int bOpenUri = va_arg(ap, int); + AtomicStore(&sqlite3GlobalConfig.bOpenUri, bOpenUri); break; } @@ -176781,6 +184569,18 @@ SQLITE_API int sqlite3_config(int op, ...){ } #endif /* SQLITE_OMIT_DESERIALIZE */ + case SQLITE_CONFIG_ROWID_IN_VIEW: { + int *pVal = va_arg(ap,int*); +#ifdef SQLITE_ALLOW_ROWID_IN_VIEW + if( 0==*pVal ) sqlite3GlobalConfig.mNoVisibleRowid = TF_NoVisibleRowid; + if( 1==*pVal ) sqlite3GlobalConfig.mNoVisibleRowid = 0; + *pVal = (sqlite3GlobalConfig.mNoVisibleRowid==0); +#else + *pVal = 0; +#endif + break; + } + default: { rc = SQLITE_ERROR; break; @@ -176881,18 +184681,19 @@ static int setupLookaside(sqlite3 *db, void *pBuf, int sz, int cnt){ db->lookaside.bMalloced = pBuf==0 ?1:0; db->lookaside.nSlot = nBig+nSm; }else{ - db->lookaside.pStart = db; + db->lookaside.pStart = 0; #ifndef SQLITE_OMIT_TWOSIZE_LOOKASIDE db->lookaside.pSmallInit = 0; db->lookaside.pSmallFree = 0; - db->lookaside.pMiddle = db; + db->lookaside.pMiddle = 0; #endif /* SQLITE_OMIT_TWOSIZE_LOOKASIDE */ - db->lookaside.pEnd = db; + db->lookaside.pEnd = 0; db->lookaside.bDisable = 1; db->lookaside.sz = 0; db->lookaside.bMalloced = 0; db->lookaside.nSlot = 0; } + db->lookaside.pTrueEnd = db->lookaside.pEnd; assert( sqlite3LookasideUsed(db,0)==0 ); #endif /* SQLITE_OMIT_LOOKASIDE */ return SQLITE_OK; @@ -176971,6 +184772,11 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3 *db){ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ va_list ap; int rc; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif + sqlite3_mutex_enter(db->mutex); va_start(ap, op); switch( op ){ case SQLITE_DBCONFIG_MAINDBNAME: { @@ -177009,6 +184815,8 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ { SQLITE_DBCONFIG_DQS_DML, SQLITE_DqsDML }, { SQLITE_DBCONFIG_LEGACY_FILE_FORMAT, SQLITE_LegacyFileFmt }, { SQLITE_DBCONFIG_TRUSTED_SCHEMA, SQLITE_TrustedSchema }, + { SQLITE_DBCONFIG_STMT_SCANSTATUS, SQLITE_StmtScanStatus }, + { SQLITE_DBCONFIG_REVERSE_SCANORDER, SQLITE_ReverseOrder }, }; unsigned int i; rc = SQLITE_ERROR; /* IMP: R-42790-23372 */ @@ -177036,6 +184844,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){ } } va_end(ap); + sqlite3_mutex_leave(db->mutex); return rc; } @@ -177296,6 +185105,14 @@ static int sqlite3Close(sqlite3 *db, int forceZombie){ } #endif + while( db->pDbData ){ + DbClientData *p = db->pDbData; + db->pDbData = p->pNext; + assert( p->pData!=0 ); + if( p->xDestructor ) p->xDestructor(p->pData); + sqlite3_free(p); + } + /* Convert the connection into a zombie and then close it. */ db->eOpenState = SQLITE_STATE_ZOMBIE; @@ -177620,6 +185437,7 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int rc){ case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break; case SQLITE_NOTICE_RECOVER_ROLLBACK: zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break; + case SQLITE_NOTICE_RBU: zName = "SQLITE_NOTICE_RBU"; break; case SQLITE_WARNING: zName = "SQLITE_WARNING"; break; case SQLITE_WARNING_AUTOINDEX: zName = "SQLITE_WARNING_AUTOINDEX"; break; case SQLITE_DONE: zName = "SQLITE_DONE"; break; @@ -177712,9 +185530,9 @@ static int sqliteDefaultBusyCallback( void *ptr, /* Database connection */ int count /* Number of times table has been busy */ ){ -#if SQLITE_OS_WIN || HAVE_USLEEP +#if SQLITE_OS_WIN || !defined(HAVE_NANOSLEEP) || HAVE_NANOSLEEP /* This case is for systems that have support for sleeping for fractions of - ** a second. Examples: All windows systems, unix systems with usleep() */ + ** a second. Examples: All windows systems, unix systems with nanosleep() */ static const u8 delays[] = { 1, 2, 5, 10, 15, 20, 25, 25, 25, 50, 50, 100 }; static const u8 totals[] = @@ -177849,7 +185667,9 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){ */ SQLITE_API void sqlite3_interrupt(sqlite3 *db){ #ifdef SQLITE_ENABLE_API_ARMOR - if( !sqlite3SafetyCheckOk(db) && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) ){ + if( !sqlite3SafetyCheckOk(db) + && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) + ){ (void)SQLITE_MISUSE_BKPT; return; } @@ -177857,6 +185677,21 @@ SQLITE_API void sqlite3_interrupt(sqlite3 *db){ AtomicStore(&db->u1.isInterrupted, 1); } +/* +** Return true or false depending on whether or not an interrupt is +** pending on connection db. +*/ +SQLITE_API int sqlite3_is_interrupted(sqlite3 *db){ +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) + && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) + ){ + (void)SQLITE_MISUSE_BKPT; + return 0; + } +#endif + return AtomicLoad(&db->u1.isInterrupted)!=0; +} /* ** This function is exactly the same as sqlite3_create_function(), except @@ -177895,13 +185730,13 @@ SQLITE_PRIVATE int sqlite3CreateFunc( assert( SQLITE_FUNC_CONSTANT==SQLITE_DETERMINISTIC ); assert( SQLITE_FUNC_DIRECT==SQLITE_DIRECTONLY ); extraFlags = enc & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY| - SQLITE_SUBTYPE|SQLITE_INNOCUOUS); + SQLITE_SUBTYPE|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE); enc &= (SQLITE_FUNC_ENCMASK|SQLITE_ANY); /* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But ** the meaning is inverted. So flip the bit. */ assert( SQLITE_FUNC_UNSAFE==SQLITE_INNOCUOUS ); - extraFlags ^= SQLITE_FUNC_UNSAFE; + extraFlags ^= SQLITE_FUNC_UNSAFE; /* tag-20230109-1 */ #ifndef SQLITE_OMIT_UTF16 @@ -177919,11 +185754,11 @@ SQLITE_PRIVATE int sqlite3CreateFunc( case SQLITE_ANY: { int rc; rc = sqlite3CreateFunc(db, zFunctionName, nArg, - (SQLITE_UTF8|extraFlags)^SQLITE_FUNC_UNSAFE, + (SQLITE_UTF8|extraFlags)^SQLITE_FUNC_UNSAFE, /* tag-20230109-1 */ pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor); if( rc==SQLITE_OK ){ rc = sqlite3CreateFunc(db, zFunctionName, nArg, - (SQLITE_UTF16LE|extraFlags)^SQLITE_FUNC_UNSAFE, + (SQLITE_UTF16LE|extraFlags)^SQLITE_FUNC_UNSAFE, /* tag-20230109-1*/ pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor); } if( rc!=SQLITE_OK ){ @@ -178172,7 +186007,7 @@ SQLITE_API int sqlite3_overload_function( rc = sqlite3FindFunction(db, zName, nArg, SQLITE_UTF8, 0)!=0; sqlite3_mutex_leave(db->mutex); if( rc ) return SQLITE_OK; - zCopy = sqlite3_mprintf(zName); + zCopy = sqlite3_mprintf("%s", zName); if( zCopy==0 ) return SQLITE_NOMEM; return sqlite3_create_function_v2(db, zName, nArg, SQLITE_UTF8, zCopy, sqlite3InvalidFunction, 0, 0, sqlite3_free); @@ -178352,6 +186187,12 @@ SQLITE_API void *sqlite3_preupdate_hook( void *pArg /* First callback argument */ ){ void *pRet; + +#ifdef SQLITE_ENABLE_API_ARMOR + if( db==0 ){ + return 0; + } +#endif sqlite3_mutex_enter(db->mutex); pRet = db->pPreUpdateArg; db->xPreUpdateCallback = xCallback; @@ -178498,7 +186339,7 @@ SQLITE_API int sqlite3_wal_checkpoint_v2( if( eModeSQLITE_CHECKPOINT_TRUNCATE ){ /* EVIDENCE-OF: R-03996-12088 The M parameter must be a valid checkpoint ** mode: */ - return SQLITE_MISUSE; + return SQLITE_MISUSE_BKPT; } sqlite3_mutex_enter(db->mutex); @@ -178975,9 +186816,9 @@ SQLITE_PRIVATE int sqlite3ParseUri( assert( *pzErrMsg==0 ); - if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ - || sqlite3GlobalConfig.bOpenUri) /* IMP: R-51689-46548 */ - && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ + if( ((flags & SQLITE_OPEN_URI) /* IMP: R-48725-32206 */ + || AtomicLoad(&sqlite3GlobalConfig.bOpenUri)) /* IMP: R-51689-46548 */ + && nUri>=5 && memcmp(zUri, "file:", 5)==0 /* IMP: R-57884-37496 */ ){ char *zOpt; int eState; /* Parser state when parsing URI */ @@ -179372,7 +187213,7 @@ static int openDatabase( ** 0 off off ** ** Legacy behavior is 3 (double-quoted string literals are allowed anywhere) -** and so that is the default. But developers are encouranged to use +** and so that is the default. But developers are encouraged to use ** -DSQLITE_DQS=0 (best) or -DSQLITE_DQS=1 (second choice) if possible. */ #if !defined(SQLITE_DQS) @@ -179420,6 +187261,9 @@ static int openDatabase( #endif #if defined(SQLITE_DEFAULT_LEGACY_ALTER_TABLE) | SQLITE_LegacyAlter +#endif +#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) + | SQLITE_StmtScanStatus #endif ; sqlite3HashInit(&db->aCollSeq); @@ -179443,6 +187287,19 @@ static int openDatabase( goto opendb_out; } +#if SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) + /* Process magic filenames ":localStorage:" and ":sessionStorage:" */ + if( zFilename && zFilename[0]==':' ){ + if( strcmp(zFilename, ":localStorage:")==0 ){ + zFilename = "file:local?vfs=kvvfs"; + flags |= SQLITE_OPEN_URI; + }else if( strcmp(zFilename, ":sessionStorage:")==0 ){ + zFilename = "file:session?vfs=kvvfs"; + flags |= SQLITE_OPEN_URI; + } + } +#endif /* SQLITE_OS_UNIX && defined(SQLITE_OS_KV_OPTIONAL) */ + /* Parse the filename/URI argument ** ** Only allow sensible combinations of bits in the flags argument. @@ -179473,6 +187330,12 @@ static int openDatabase( sqlite3_free(zErrMsg); goto opendb_out; } + assert( db->pVfs!=0 ); +#if SQLITE_OS_KV || defined(SQLITE_OS_KV_OPTIONAL) + if( sqlite3_stricmp(db->pVfs->zName, "kvvfs")==0 ){ + db->temp_store = 2; + } +#endif /* Open the backend database driver */ rc = sqlite3BtreeOpen(db->pVfs, zOpen, db, &db->aDb[0].pBt, 0, @@ -179762,6 +187625,69 @@ SQLITE_API int sqlite3_collation_needed16( } #endif /* SQLITE_OMIT_UTF16 */ +/* +** Find existing client data. +*/ +SQLITE_API void *sqlite3_get_clientdata(sqlite3 *db, const char *zName){ + DbClientData *p; + sqlite3_mutex_enter(db->mutex); + for(p=db->pDbData; p; p=p->pNext){ + if( strcmp(p->zName, zName)==0 ){ + void *pResult = p->pData; + sqlite3_mutex_leave(db->mutex); + return pResult; + } + } + sqlite3_mutex_leave(db->mutex); + return 0; +} + +/* +** Add new client data to a database connection. +*/ +SQLITE_API int sqlite3_set_clientdata( + sqlite3 *db, /* Attach client data to this connection */ + const char *zName, /* Name of the client data */ + void *pData, /* The client data itself */ + void (*xDestructor)(void*) /* Destructor */ +){ + DbClientData *p, **pp; + sqlite3_mutex_enter(db->mutex); + pp = &db->pDbData; + for(p=db->pDbData; p && strcmp(p->zName,zName); p=p->pNext){ + pp = &p->pNext; + } + if( p ){ + assert( p->pData!=0 ); + if( p->xDestructor ) p->xDestructor(p->pData); + if( pData==0 ){ + *pp = p->pNext; + sqlite3_free(p); + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; + } + }else if( pData==0 ){ + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; + }else{ + size_t n = strlen(zName); + p = sqlite3_malloc64( sizeof(DbClientData)+n+1 ); + if( p==0 ){ + if( xDestructor ) xDestructor(pData); + sqlite3_mutex_leave(db->mutex); + return SQLITE_NOMEM; + } + memcpy(p->zName, zName, n+1); + p->pNext = db->pDbData; + db->pDbData = p; + } + p->pData = pData; + p->xDestructor = xDestructor; + sqlite3_mutex_leave(db->mutex); + return SQLITE_OK; +} + + #ifndef SQLITE_OMIT_DEPRECATED /* ** This function is now an anachronism. It used to be used to recover from a @@ -179897,7 +187823,7 @@ SQLITE_API int sqlite3_table_column_metadata( /* Find the column for which info is requested */ if( zColumnName==0 ){ - /* Query for existance of table only */ + /* Query for existence of table only */ }else{ for(iCol=0; iColnCol; iCol++){ pCol = &pTab->aCol[iCol]; @@ -179978,7 +187904,7 @@ SQLITE_API int sqlite3_sleep(int ms){ /* This function works in milliseconds, but the underlying OsSleep() ** API uses microseconds. Hence the 1000's. */ - rc = (sqlite3OsSleep(pVfs, 1000*ms)/1000); + rc = (sqlite3OsSleep(pVfs, ms<0 ? 0 : 1000*ms)/1000); return rc; } @@ -180034,6 +187960,9 @@ SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, vo sqlite3BtreeSetPageSize(pBtree, 0, iNew, 0); } rc = SQLITE_OK; + }else if( op==SQLITE_FCNTL_RESET_CACHE ){ + sqlite3BtreeClearCache(pBtree); + rc = SQLITE_OK; }else{ int nSave = db->busyHandler.nBusy; rc = sqlite3OsFileControl(fd, op, pArg); @@ -180108,6 +188037,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){ } #endif + /* sqlite3_test_control(SQLITE_TESTCTRL_FK_NO_ACTION, sqlite3 *db, int b); + ** + ** If b is true, then activate the SQLITE_FkNoAction setting. If b is + ** false then clearn that setting. If the SQLITE_FkNoAction setting is + ** abled, all foreign key ON DELETE and ON UPDATE actions behave as if + ** they were NO ACTION, regardless of how they are defined. + ** + ** NB: One must usually run "PRAGMA writable_schema=RESET" after + ** using this test-control, before it will take full effect. failing + ** to reset the schema can result in some unexpected behavior. + */ + case SQLITE_TESTCTRL_FK_NO_ACTION: { + sqlite3 *db = va_arg(ap, sqlite3*); + int b = va_arg(ap, int); + if( b ){ + db->flags |= SQLITE_FkNoAction; + }else{ + db->flags &= ~SQLITE_FkNoAction; + } + break; + } + /* ** sqlite3_test_control(BITVEC_TEST, size, program) ** @@ -180214,10 +188165,12 @@ SQLITE_API int sqlite3_test_control(int op, ...){ sqlite3ShowSrcList(0); sqlite3ShowWith(0); sqlite3ShowUpsert(0); +#ifndef SQLITE_OMIT_TRIGGER sqlite3ShowTriggerStep(0); sqlite3ShowTriggerStepList(0); sqlite3ShowTrigger(0); sqlite3ShowTriggerList(0); +#endif #ifndef SQLITE_OMIT_WINDOWFUNC sqlite3ShowWindow(0); sqlite3ShowWinFunc(0); @@ -180334,7 +188287,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){ ** formed and never corrupt. This flag is clear by default, indicating that ** database files might have arbitrary corruption. Setting the flag during ** testing causes certain assert() statements in the code to be activated - ** that demonstrat invariants on well-formed database files. + ** that demonstrate invariants on well-formed database files. */ case SQLITE_TESTCTRL_NEVER_CORRUPT: { sqlite3GlobalConfig.neverCorrupt = va_arg(ap, int); @@ -180488,7 +188441,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){ ** ** op==0 Store the current sqlite3TreeTrace in *ptr ** op==1 Set sqlite3TreeTrace to the value *ptr - ** op==3 Store the current sqlite3WhereTrace in *ptr + ** op==2 Store the current sqlite3WhereTrace in *ptr ** op==3 Set sqlite3WhereTrace to the value *ptr */ case SQLITE_TESTCTRL_TRACEFLAGS: { @@ -180524,6 +188477,23 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } +#if !defined(SQLITE_OMIT_WSD) + /* sqlite3_test_control(SQLITE_TESTCTRL_USELONGDOUBLE, int X); + ** + ** X<0 Make no changes to the bUseLongDouble. Just report value. + ** X==0 Disable bUseLongDouble + ** X==1 Enable bUseLongDouble + ** X>=2 Set bUseLongDouble to its default value for this platform + */ + case SQLITE_TESTCTRL_USELONGDOUBLE: { + int b = va_arg(ap, int); + if( b>=2 ) b = hasHighPrecisionDouble(b); + if( b>=0 ) sqlite3Config.bUseLongDouble = b>0; + rc = sqlite3Config.bUseLongDouble!=0; + break; + } +#endif + #if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) /* sqlite3_test_control(SQLITE_TESTCTRL_TUNE, id, *piValue) @@ -180554,6 +188524,28 @@ SQLITE_API int sqlite3_test_control(int op, ...){ break; } #endif + + /* sqlite3_test_control(SQLITE_TESTCTRL_JSON_SELFCHECK, &onOff); + ** + ** Activate or deactivate validation of JSONB that is generated from + ** text. Off by default, as the validation is slow. Validation is + ** only available if compiled using SQLITE_DEBUG. + ** + ** If onOff is initially 1, then turn it on. If onOff is initially + ** off, turn it off. If onOff is initially -1, then change onOff + ** to be the current setting. + */ + case SQLITE_TESTCTRL_JSON_SELFCHECK: { +#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_WSD) + int *pOnOff = va_arg(ap, int*); + if( *pOnOff<0 ){ + *pOnOff = sqlite3Config.bJsonSelfcheck; + }else{ + sqlite3Config.bJsonSelfcheck = (u8)((*pOnOff)&0xff); + } +#endif + break; + } } va_end(ap); #endif /* SQLITE_UNTESTABLE */ @@ -180594,7 +188586,7 @@ static char *appendText(char *p, const char *z){ ** Memory layout must be compatible with that generated by the pager ** and expected by sqlite3_uri_parameter() and databaseName(). */ -SQLITE_API char *sqlite3_create_filename( +SQLITE_API const char *sqlite3_create_filename( const char *zDatabase, const char *zJournal, const char *zWal, @@ -180630,10 +188622,10 @@ SQLITE_API char *sqlite3_create_filename( ** error to call this routine with any parameter other than a pointer ** previously obtained from sqlite3_create_filename() or a NULL pointer. */ -SQLITE_API void sqlite3_free_filename(char *p){ +SQLITE_API void sqlite3_free_filename(const char *p){ if( p==0 ) return; - p = (char*)databaseName(p); - sqlite3_free(p - 4); + p = databaseName(p); + sqlite3_free((char*)p - 4); } @@ -180824,7 +188816,7 @@ SQLITE_API int sqlite3_snapshot_get( } /* -** Open a read-transaction on the snapshot idendified by pSnapshot. +** Open a read-transaction on the snapshot identified by pSnapshot. */ SQLITE_API int sqlite3_snapshot_open( sqlite3 *db, @@ -180884,8 +188876,8 @@ SQLITE_API int sqlite3_snapshot_open( */ SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){ int rc = SQLITE_ERROR; - int iDb; #ifndef SQLITE_OMIT_WAL + int iDb; #ifdef SQLITE_ENABLE_API_ARMOR if( !sqlite3SafetyCheckOk(db) ){ @@ -180931,7 +188923,7 @@ SQLITE_API int sqlite3_compileoption_used(const char *zOptName){ int nOpt; const char **azCompileOpt; -#if SQLITE_ENABLE_API_ARMOR +#ifdef SQLITE_ENABLE_API_ARMOR if( zOptName==0 ){ (void)SQLITE_MISUSE_BKPT; return 0; @@ -181126,6 +189118,9 @@ SQLITE_API int sqlite3_unlock_notify( ){ int rc = SQLITE_OK; +#ifdef SQLITE_ENABLE_API_ARMOR + if( !sqlite3SafetyCheckOk(db) ) return SQLITE_MISUSE_BKPT; +#endif sqlite3_mutex_enter(db->mutex); enterMutex(); @@ -182147,6 +190142,7 @@ struct Fts3Table { int nPgsz; /* Page size for host database */ char *zSegmentsTbl; /* Name of %_segments table */ sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ + int iSavepoint; /* ** The following array of hash tables is used to buffer pending index @@ -182532,6 +190528,10 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int); SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int); #endif +SQLITE_PRIVATE int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*); + +SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk); + #endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ #endif /* _FTSINT_H */ @@ -182888,6 +190888,7 @@ static void fts3DeclareVtab(int *pRc, Fts3Table *p){ zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid"); sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + sqlite3_vtab_config(p->db, SQLITE_VTAB_INNOCUOUS); /* Create a list of user columns for the virtual table */ zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); @@ -186137,6 +194138,8 @@ static int fts3RenameMethod( rc = sqlite3Fts3PendingTermsFlush(p); } + p->bIgnoreSavepoint = 1; + if( p->zContentTbl==0 ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';", @@ -186164,6 +194167,8 @@ static int fts3RenameMethod( "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';", p->zDb, p->zName, zName ); + + p->bIgnoreSavepoint = 0; return rc; } @@ -186174,12 +194179,28 @@ static int fts3RenameMethod( */ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ int rc = SQLITE_OK; - UNUSED_PARAMETER(iSavepoint); - assert( ((Fts3Table *)pVtab)->inTransaction ); - assert( ((Fts3Table *)pVtab)->mxSavepoint <= iSavepoint ); - TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); - if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){ - rc = fts3SyncMethod(pVtab); + Fts3Table *pTab = (Fts3Table*)pVtab; + assert( pTab->inTransaction ); + assert( pTab->mxSavepoint<=iSavepoint ); + TESTONLY( pTab->mxSavepoint = iSavepoint ); + + if( pTab->bIgnoreSavepoint==0 ){ + if( fts3HashCount(&pTab->aIndex[0].hPending)>0 ){ + char *zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')", + pTab->zDb, pTab->zName, pTab->zName + ); + if( zSql ){ + pTab->bIgnoreSavepoint = 1; + rc = sqlite3_exec(pTab->db, zSql, 0, 0, 0); + pTab->bIgnoreSavepoint = 0; + sqlite3_free(zSql); + }else{ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; + } } return rc; } @@ -186190,12 +194211,11 @@ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ ** This is a no-op. */ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ - TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); - UNUSED_PARAMETER(iSavepoint); - UNUSED_PARAMETER(pVtab); - assert( p->inTransaction ); - assert( p->mxSavepoint >= iSavepoint ); - TESTONLY( p->mxSavepoint = iSavepoint-1 ); + Fts3Table *pTab = (Fts3Table*)pVtab; + assert( pTab->inTransaction ); + assert( pTab->mxSavepoint >= iSavepoint ); + TESTONLY( pTab->mxSavepoint = iSavepoint-1 ); + pTab->iSavepoint = iSavepoint; return SQLITE_OK; } @@ -186205,11 +194225,13 @@ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ ** Discard the contents of the pending terms table. */ static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ - Fts3Table *p = (Fts3Table*)pVtab; + Fts3Table *pTab = (Fts3Table*)pVtab; UNUSED_PARAMETER(iSavepoint); - assert( p->inTransaction ); - TESTONLY( p->mxSavepoint = iSavepoint ); - sqlite3Fts3PendingTermsClear(p); + assert( pTab->inTransaction ); + TESTONLY( pTab->mxSavepoint = iSavepoint ); + if( (iSavepoint+1)<=pTab->iSavepoint ){ + sqlite3Fts3PendingTermsClear(pTab); + } return SQLITE_OK; } @@ -186228,8 +194250,40 @@ static int fts3ShadowName(const char *zName){ return 0; } +/* +** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual +** table. +*/ +static int fts3IntegrityMethod( + sqlite3_vtab *pVtab, /* The virtual table to be checked */ + const char *zSchema, /* Name of schema in which pVtab lives */ + const char *zTabname, /* Name of the pVTab table */ + int isQuick, /* True if this is a quick_check */ + char **pzErr /* Write error message here */ +){ + Fts3Table *p = (Fts3Table*)pVtab; + int rc; + int bOk = 0; + + UNUSED_PARAMETER(isQuick); + rc = sqlite3Fts3IntegrityCheck(p, &bOk); + assert( rc!=SQLITE_CORRUPT_VTAB || bOk==0 ); + if( rc!=SQLITE_OK && rc!=SQLITE_CORRUPT_VTAB ){ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS%d table %s.%s: %s", + p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc)); + }else if( bOk==0 ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", + p->bFts4 ? 4 : 3, zSchema, zTabname); + } + sqlite3Fts3SegmentsClose(p); + return SQLITE_OK; +} + + + static const sqlite3_module fts3Module = { - /* iVersion */ 3, + /* iVersion */ 4, /* xCreate */ fts3CreateMethod, /* xConnect */ fts3ConnectMethod, /* xBestIndex */ fts3BestIndexMethod, @@ -186253,6 +194307,7 @@ static const sqlite3_module fts3Module = { /* xRelease */ fts3ReleaseMethod, /* xRollbackTo */ fts3RollbackToMethod, /* xShadowName */ fts3ShadowName, + /* xIntegrity */ fts3IntegrityMethod, }; /* @@ -187535,9 +195590,8 @@ static void fts3EvalNextRow( Fts3Expr *pExpr, /* Expr. to advance to next matching row */ int *pRc /* IN/OUT: Error code */ ){ - if( *pRc==SQLITE_OK ){ + if( *pRc==SQLITE_OK && pExpr->bEof==0 ){ int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */ - assert( pExpr->bEof==0 ); pExpr->bStart = 1; switch( pExpr->eType ){ @@ -188013,6 +196067,22 @@ static void fts3EvalUpdateCounts(Fts3Expr *pExpr, int nCol){ } } +/* +** This is an sqlite3Fts3ExprIterate() callback. If the Fts3Expr.aMI[] array +** has not yet been allocated, allocate and zero it. Otherwise, just zero +** it. +*/ +static int fts3AllocateMSI(Fts3Expr *pExpr, int iPhrase, void *pCtx){ + Fts3Table *pTab = (Fts3Table*)pCtx; + UNUSED_PARAMETER(iPhrase); + if( pExpr->aMI==0 ){ + pExpr->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32)); + if( pExpr->aMI==0 ) return SQLITE_NOMEM; + } + memset(pExpr->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); + return SQLITE_OK; +} + /* ** Expression pExpr must be of type FTSQUERY_PHRASE. ** @@ -188034,7 +196104,6 @@ static int fts3EvalGatherStats( if( pExpr->aMI==0 ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; Fts3Expr *pRoot; /* Root of NEAR expression */ - Fts3Expr *p; /* Iterator used for several purposes */ sqlite3_int64 iPrevId = pCsr->iPrevId; sqlite3_int64 iDocid; @@ -188042,7 +196111,9 @@ static int fts3EvalGatherStats( /* Find the root of the NEAR expression */ pRoot = pExpr; - while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){ + while( pRoot->pParent + && (pRoot->pParent->eType==FTSQUERY_NEAR || pRoot->bDeferred) + ){ pRoot = pRoot->pParent; } iDocid = pRoot->iDocid; @@ -188050,14 +196121,8 @@ static int fts3EvalGatherStats( assert( pRoot->bStart ); /* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */ - for(p=pRoot; p; p=p->pLeft){ - Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight); - assert( pE->aMI==0 ); - pE->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32)); - if( !pE->aMI ) return SQLITE_NOMEM; - memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); - } - + rc = sqlite3Fts3ExprIterate(pRoot, fts3AllocateMSI, (void*)pTab); + if( rc!=SQLITE_OK ) return rc; fts3EvalRestart(pCsr, pRoot, &rc); while( pCsr->isEof==0 && rc==SQLITE_OK ){ @@ -188213,6 +196278,7 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist( u8 bTreeEof = 0; Fts3Expr *p; /* Used to iterate from pExpr to root */ Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */ + Fts3Expr *pRun; /* Closest non-deferred ancestor of pNear */ int bMatch; /* Check if this phrase descends from an OR expression node. If not, @@ -188227,25 +196293,30 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist( if( p->bEof ) bTreeEof = 1; } if( bOr==0 ) return SQLITE_OK; + pRun = pNear; + while( pRun->bDeferred ){ + assert( pRun->pParent ); + pRun = pRun->pParent; + } /* This is the descendent of an OR node. In this case we cannot use ** an incremental phrase. Load the entire doclist for the phrase ** into memory in this case. */ if( pPhrase->bIncr ){ - int bEofSave = pNear->bEof; - fts3EvalRestart(pCsr, pNear, &rc); - while( rc==SQLITE_OK && !pNear->bEof ){ - fts3EvalNextRow(pCsr, pNear, &rc); - if( bEofSave==0 && pNear->iDocid==iDocid ) break; + int bEofSave = pRun->bEof; + fts3EvalRestart(pCsr, pRun, &rc); + while( rc==SQLITE_OK && !pRun->bEof ){ + fts3EvalNextRow(pCsr, pRun, &rc); + if( bEofSave==0 && pRun->iDocid==iDocid ) break; } assert( rc!=SQLITE_OK || pPhrase->bIncr==0 ); - if( rc==SQLITE_OK && pNear->bEof!=bEofSave ){ + if( rc==SQLITE_OK && pRun->bEof!=bEofSave ){ rc = FTS_CORRUPT_VTAB; } } if( bTreeEof ){ - while( rc==SQLITE_OK && !pNear->bEof ){ - fts3EvalNextRow(pCsr, pNear, &rc); + while( rc==SQLITE_OK && !pRun->bEof ){ + fts3EvalNextRow(pCsr, pRun, &rc); } } if( rc!=SQLITE_OK ) return rc; @@ -188912,7 +196983,8 @@ SQLITE_PRIVATE int sqlite3Fts3InitAux(sqlite3 *db){ 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */ @@ -192478,7 +200550,8 @@ SQLITE_PRIVATE int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash, void(*xDestr 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */ @@ -195161,16 +203234,18 @@ static int fts3MsrBufferData( char *pList, i64 nList ){ - if( nList>pMsr->nBuffer ){ + if( (nList+FTS3_NODE_PADDING)>pMsr->nBuffer ){ char *pNew; - pMsr->nBuffer = nList*2; - pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, pMsr->nBuffer); + int nNew = nList*2 + FTS3_NODE_PADDING; + pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, nNew); if( !pNew ) return SQLITE_NOMEM; pMsr->aBuffer = pNew; + pMsr->nBuffer = nNew; } assert( nList>0 ); memcpy(pMsr->aBuffer, pList, nList); + memset(&pMsr->aBuffer[nList], 0, FTS3_NODE_PADDING); return SQLITE_OK; } @@ -195817,7 +203892,6 @@ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } - sqlite3Fts3PendingTermsClear(p); /* Determine the auto-incr-merge setting if unknown. If enabled, ** estimate the number of leaf blocks of content to be written @@ -195839,6 +203913,10 @@ SQLITE_PRIVATE int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ rc = sqlite3_reset(pStmt); } } + + if( rc==SQLITE_OK ){ + sqlite3Fts3PendingTermsClear(p); + } return rc; } @@ -196470,6 +204548,8 @@ static int fts3AppendToNode( blobGrowBuffer(pPrev, nTerm, &rc); if( rc!=SQLITE_OK ) return rc; + assert( pPrev!=0 ); + assert( pPrev->a!=0 ); nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm); nSuffix = nTerm - nPrefix; @@ -196526,9 +204606,13 @@ static int fts3IncrmergeAppend( nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; /* If the current block is not empty, and if adding this term/doclist - ** to the current block would make it larger than Fts3Table.nNodeSize - ** bytes, write this block out to the database. */ - if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){ + ** to the current block would make it larger than Fts3Table.nNodeSize bytes, + ** and if there is still room for another leaf page, write this block out to + ** the database. */ + if( pLeaf->block.n>0 + && (pLeaf->block.n + nSpace)>p->nNodeSize + && pLeaf->iBlock < (pWriter->iStart + pWriter->nLeafEst) + ){ rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n); pWriter->nWork++; @@ -196839,6 +204923,7 @@ static int fts3IncrmergeLoad( for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){ NodeReader reader; + memset(&reader, 0, sizeof(reader)); pNode = &pWriter->aNodeWriter[i]; if( pNode->block.a){ @@ -196859,7 +204944,7 @@ static int fts3IncrmergeLoad( rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock,0); blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc - ); + ); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aBlock, nBlock); pNode->block.n = nBlock; @@ -197709,7 +205794,7 @@ static u64 fts3ChecksumIndex( int rc; u64 cksum = 0; - assert( *pRc==SQLITE_OK ); + if( *pRc ) return 0; memset(&filter, 0, sizeof(filter)); memset(&csr, 0, sizeof(csr)); @@ -197776,7 +205861,7 @@ static u64 fts3ChecksumIndex( ** If an error occurs (e.g. an OOM or IO error), return an SQLite error ** code. The final value of *pbOk is undefined in this case. */ -static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ +SQLITE_PRIVATE int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){ int rc = SQLITE_OK; /* Return code */ u64 cksum1 = 0; /* Checksum based on FTS index contents */ u64 cksum2 = 0; /* Checksum based on %_content contents */ @@ -197854,7 +205939,7 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ sqlite3_finalize(pStmt); } - *pbOk = (cksum1==cksum2); + *pbOk = (rc==SQLITE_OK && cksum1==cksum2); return rc; } @@ -197894,7 +205979,7 @@ static int fts3DoIntegrityCheck( ){ int rc; int bOk = 0; - rc = fts3IntegrityCheck(p, &bOk); + rc = sqlite3Fts3IntegrityCheck(p, &bOk); if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB; return rc; } @@ -197924,8 +206009,11 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ rc = fts3DoIncrmerge(p, &zVal[6]); }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){ rc = fts3DoAutoincrmerge(p, &zVal[10]); + }else if( nVal==5 && 0==sqlite3_strnicmp(zVal, "flush", 5) ){ + rc = sqlite3Fts3PendingTermsFlush(p); + } #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - }else{ + else{ int v; if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){ v = atoi(&zVal[9]); @@ -197943,8 +206031,8 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ if( v>=4 && v<=FTS3_MERGE_COUNT && (v&1)==0 ) p->nMergeCount = v; rc = SQLITE_OK; } -#endif } +#endif return rc; } @@ -198352,7 +206440,7 @@ typedef sqlite3_int64 i64; /* -** Used as an fts3ExprIterate() context when loading phrase doclists to +** Used as an sqlite3Fts3ExprIterate() context when loading phrase doclists to ** Fts3Expr.aDoclist[]/nDoclist. */ typedef struct LoadDoclistCtx LoadDoclistCtx; @@ -198396,7 +206484,7 @@ struct SnippetFragment { }; /* -** This type is used as an fts3ExprIterate() context object while +** This type is used as an sqlite3Fts3ExprIterate() context object while ** accumulating the data returned by the matchinfo() function. */ typedef struct MatchInfo MatchInfo; @@ -198555,7 +206643,7 @@ static void fts3GetDeltaPosition(char **pp, i64 *piPos){ } /* -** Helper function for fts3ExprIterate() (see below). +** Helper function for sqlite3Fts3ExprIterate() (see below). */ static int fts3ExprIterate2( Fts3Expr *pExpr, /* Expression to iterate phrases of */ @@ -198589,7 +206677,7 @@ static int fts3ExprIterate2( ** Otherwise, SQLITE_OK is returned after a callback has been made for ** all eligible phrase nodes. */ -static int fts3ExprIterate( +SQLITE_PRIVATE int sqlite3Fts3ExprIterate( Fts3Expr *pExpr, /* Expression to iterate phrases of */ int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ void *pCtx /* Second argument to pass to callback */ @@ -198598,10 +206686,9 @@ static int fts3ExprIterate( return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx); } - /* -** This is an fts3ExprIterate() callback used while loading the doclists -** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also +** This is an sqlite3Fts3ExprIterate() callback used while loading the +** doclists for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also ** fts3ExprLoadDoclists(). */ static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ @@ -198633,9 +206720,9 @@ static int fts3ExprLoadDoclists( int *pnToken /* OUT: Number of tokens in query */ ){ int rc; /* Return Code */ - LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */ + LoadDoclistCtx sCtx = {0,0,0}; /* Context for sqlite3Fts3ExprIterate() */ sCtx.pCsr = pCsr; - rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx); + rc = sqlite3Fts3ExprIterate(pCsr->pExpr,fts3ExprLoadDoclistsCb,(void*)&sCtx); if( pnPhrase ) *pnPhrase = sCtx.nPhrase; if( pnToken ) *pnToken = sCtx.nToken; return rc; @@ -198648,7 +206735,7 @@ static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ } static int fts3ExprPhraseCount(Fts3Expr *pExpr){ int nPhrase = 0; - (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); + (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); return nPhrase; } @@ -198776,8 +206863,9 @@ static void fts3SnippetDetails( } /* -** This function is an fts3ExprIterate() callback used by fts3BestSnippet(). -** Each invocation populates an element of the SnippetIter.aPhrase[] array. +** This function is an sqlite3Fts3ExprIterate() callback used by +** fts3BestSnippet(). Each invocation populates an element of the +** SnippetIter.aPhrase[] array. */ static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){ SnippetIter *p = (SnippetIter *)ctx; @@ -198867,7 +206955,9 @@ static int fts3BestSnippet( sIter.nSnippet = nSnippet; sIter.nPhrase = nList; sIter.iCurrent = -1; - rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter); + rc = sqlite3Fts3ExprIterate( + pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter + ); if( rc==SQLITE_OK ){ /* Set the *pmSeen output variable. */ @@ -199228,10 +207318,10 @@ static int fts3ExprLHitGather( } /* -** fts3ExprIterate() callback used to collect the "global" matchinfo stats -** for a single query. +** sqlite3Fts3ExprIterate() callback used to collect the "global" matchinfo +** stats for a single query. ** -** fts3ExprIterate() callback to load the 'global' elements of a +** sqlite3Fts3ExprIterate() callback to load the 'global' elements of a ** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements ** of the matchinfo array that are constant for all rows returned by the ** current query. @@ -199266,7 +207356,7 @@ static int fts3ExprGlobalHitsCb( } /* -** fts3ExprIterate() callback used to collect the "local" part of the +** sqlite3Fts3ExprIterate() callback used to collect the "local" part of the ** FTS3_MATCHINFO_HITS array. The local stats are those elements of the ** array that are different for each row returned by the query. */ @@ -199462,7 +207552,7 @@ static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){ **/ aIter = sqlite3Fts3MallocZero(sizeof(LcsIterator) * pCsr->nPhrase); if( !aIter ) return SQLITE_NOMEM; - (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); + (void)sqlite3Fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); for(i=0; inPhrase; i++){ LcsIterator *pIter = &aIter[i]; @@ -199639,11 +207729,11 @@ static int fts3MatchinfoValues( rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc,0,0); if( rc!=SQLITE_OK ) break; } - rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); + rc = sqlite3Fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); sqlite3Fts3EvalTestDeferred(pCsr, &rc); if( rc!=SQLITE_OK ) break; } - (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); + (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); break; } } @@ -199866,7 +207956,7 @@ struct TermOffsetCtx { }; /* -** This function is an fts3ExprIterate() callback used by sqlite3Fts3Offsets(). +** This function is an sqlite3Fts3ExprIterate() callback used by sqlite3Fts3Offsets(). */ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ TermOffsetCtx *p = (TermOffsetCtx *)ctx; @@ -199948,7 +208038,9 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets( */ sCtx.iCol = iCol; sCtx.iTerm = 0; - rc = fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx); + rc = sqlite3Fts3ExprIterate( + pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx + ); if( rc!=SQLITE_OK ) goto offsets_out; /* Retreive the text stored in column iCol. If an SQL NULL is stored @@ -200861,59 +208953,242 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int eRemoveDiacritic){ ** ****************************************************************************** ** -** This SQLite JSON functions. +** SQLite JSON functions. ** ** This file began as an extension in ext/misc/json1.c in 2015. That ** extension proved so useful that it has now been moved into the core. ** -** For the time being, all JSON is stored as pure text. (We might add -** a JSONB type in the future which stores a binary encoding of JSON in -** a BLOB, but there is no support for JSONB in the current implementation. -** This implementation parses JSON text at 250 MB/s, so it is hard to see -** how JSONB might improve on that.) +** The original design stored all JSON as pure text, canonical RFC-8259. +** Support for JSON-5 extensions was added with version 3.42.0 (2023-05-16). +** All generated JSON text still conforms strictly to RFC-8259, but text +** with JSON-5 extensions is accepted as input. +** +** Beginning with version 3.45.0 (circa 2024-01-01), these routines also +** accept BLOB values that have JSON encoded using a binary representation +** called "JSONB". The name JSONB comes from PostgreSQL, however the on-disk +** format SQLite JSONB is completely different and incompatible with +** PostgreSQL JSONB. +** +** Decoding and interpreting JSONB is still O(N) where N is the size of +** the input, the same as text JSON. However, the constant of proportionality +** for JSONB is much smaller due to faster parsing. The size of each +** element in JSONB is encoded in its header, so there is no need to search +** for delimiters using persnickety syntax rules. JSONB seems to be about +** 3x faster than text JSON as a result. JSONB is also tends to be slightly +** smaller than text JSON, by 5% or 10%, but there are corner cases where +** JSONB can be slightly larger. So you are not far mistaken to say that +** a JSONB blob is the same size as the equivalent RFC-8259 text. +** +** +** THE JSONB ENCODING: +** +** Every JSON element is encoded in JSONB as a header and a payload. +** The header is between 1 and 9 bytes in size. The payload is zero +** or more bytes. +** +** The lower 4 bits of the first byte of the header determines the +** element type: +** +** 0: NULL +** 1: TRUE +** 2: FALSE +** 3: INT -- RFC-8259 integer literal +** 4: INT5 -- JSON5 integer literal +** 5: FLOAT -- RFC-8259 floating point literal +** 6: FLOAT5 -- JSON5 floating point literal +** 7: TEXT -- Text literal acceptable to both SQL and JSON +** 8: TEXTJ -- Text containing RFC-8259 escapes +** 9: TEXT5 -- Text containing JSON5 and/or RFC-8259 escapes +** 10: TEXTRAW -- Text containing unescaped syntax characters +** 11: ARRAY +** 12: OBJECT +** +** The other three possible values (13-15) are reserved for future +** enhancements. +** +** The upper 4 bits of the first byte determine the size of the header +** and sometimes also the size of the payload. If X is the first byte +** of the element and if X>>4 is between 0 and 11, then the payload +** will be that many bytes in size and the header is exactly one byte +** in size. Other four values for X>>4 (12-15) indicate that the header +** is more than one byte in size and that the payload size is determined +** by the remainder of the header, interpreted as a unsigned big-endian +** integer. +** +** Value of X>>4 Size integer Total header size +** ------------- -------------------- ----------------- +** 12 1 byte (0-255) 2 +** 13 2 byte (0-65535) 3 +** 14 4 byte (0-4294967295) 5 +** 15 8 byte (0-1.8e19) 9 +** +** The payload size need not be expressed in its minimal form. For example, +** if the payload size is 10, the size can be expressed in any of 5 different +** ways: (1) (X>>4)==10, (2) (X>>4)==12 following by on 0x0a byte, +** (3) (X>>4)==13 followed by 0x00 and 0x0a, (4) (X>>4)==14 followed by +** 0x00 0x00 0x00 0x0a, or (5) (X>>4)==15 followed by 7 bytes of 0x00 and +** a single byte of 0x0a. The shorter forms are preferred, of course, but +** sometimes when generating JSONB, the payload size is not known in advance +** and it is convenient to reserve sufficient header space to cover the +** largest possible payload size and then come back later and patch up +** the size when it becomes known, resulting in a non-minimal encoding. +** +** The value (X>>4)==15 is not actually used in the current implementation +** (as SQLite is currently unable handle BLOBs larger than about 2GB) +** but is included in the design to allow for future enhancements. +** +** The payload follows the header. NULL, TRUE, and FALSE have no payload and +** their payload size must always be zero. The payload for INT, INT5, +** FLOAT, FLOAT5, TEXT, TEXTJ, TEXT5, and TEXTROW is text. Note that the +** "..." or '...' delimiters are omitted from the various text encodings. +** The payload for ARRAY and OBJECT is a list of additional elements that +** are the content for the array or object. The payload for an OBJECT +** must be an even number of elements. The first element of each pair is +** the label and must be of type TEXT, TEXTJ, TEXT5, or TEXTRAW. +** +** A valid JSONB blob consists of a single element, as described above. +** Usually this will be an ARRAY or OBJECT element which has many more +** elements as its content. But the overall blob is just a single element. +** +** Input validation for JSONB blobs simply checks that the element type +** code is between 0 and 12 and that the total size of the element +** (header plus payload) is the same as the size of the BLOB. If those +** checks are true, the BLOB is assumed to be JSONB and processing continues. +** Errors are only raised if some other miscoding is discovered during +** processing. +** +** Additional information can be found in the doc/jsonb.md file of the +** canonical SQLite source tree. */ #ifndef SQLITE_OMIT_JSON /* #include "sqliteInt.h" */ +/* JSONB element types +*/ +#define JSONB_NULL 0 /* "null" */ +#define JSONB_TRUE 1 /* "true" */ +#define JSONB_FALSE 2 /* "false" */ +#define JSONB_INT 3 /* integer acceptable to JSON and SQL */ +#define JSONB_INT5 4 /* integer in 0x000 notation */ +#define JSONB_FLOAT 5 /* float acceptable to JSON and SQL */ +#define JSONB_FLOAT5 6 /* float with JSON5 extensions */ +#define JSONB_TEXT 7 /* Text compatible with both JSON and SQL */ +#define JSONB_TEXTJ 8 /* Text with JSON escapes */ +#define JSONB_TEXT5 9 /* Text with JSON-5 escape */ +#define JSONB_TEXTRAW 10 /* SQL text that needs escaping for JSON */ +#define JSONB_ARRAY 11 /* An array */ +#define JSONB_OBJECT 12 /* An object */ + +/* Human-readable names for the JSONB values. The index for each +** string must correspond to the JSONB_* integer above. +*/ +static const char * const jsonbType[] = { + "null", "true", "false", "integer", "integer", + "real", "real", "text", "text", "text", + "text", "array", "object", "", "", "", "" +}; + /* ** Growing our own isspace() routine this way is twice as fast as ** the library isspace() function, resulting in a 7% overall performance -** increase for the parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). +** increase for the text-JSON parser. (Ubuntu14.10 gcc 4.8.4 x64 with -Os). */ static const char jsonIsSpace[] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, +}; +#define jsonIsspace(x) (jsonIsSpace[(unsigned char)x]) + +/* +** The set of all space characters recognized by jsonIsspace(). +** Useful as the second argument to strspn(). +*/ +static const char jsonSpaces[] = "\011\012\015\040"; + +/* +** Characters that are special to JSON. Control characters, +** '"' and '\\' and '\''. Actually, '\'' is not special to +** canonical JSON, but it is special in JSON-5, so we include +** it in the set of special characters. +*/ +static const char jsonIsOk[256] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; -#define fast_isspace(x) (jsonIsSpace[(unsigned char)x]) - -#if !defined(SQLITE_DEBUG) && !defined(SQLITE_COVERAGE_TEST) -# define VVA(X) -#else -# define VVA(X) X -#endif /* Objects */ +typedef struct JsonCache JsonCache; typedef struct JsonString JsonString; -typedef struct JsonNode JsonNode; typedef struct JsonParse JsonParse; +/* +** Magic number used for the JSON parse cache in sqlite3_get_auxdata() +*/ +#define JSON_CACHE_ID (-429938) /* Cache entry */ +#define JSON_CACHE_SIZE 4 /* Max number of cache entries */ + +/* +** jsonUnescapeOneChar() returns this invalid code point if it encounters +** a syntax error. +*/ +#define JSON_INVALID_CHAR 0x99999 + +/* A cache mapping JSON text into JSONB blobs. +** +** Each cache entry is a JsonParse object with the following restrictions: +** +** * The bReadOnly flag must be set +** +** * The aBlob[] array must be owned by the JsonParse object. In other +** words, nBlobAlloc must be non-zero. +** +** * eEdit and delta must be zero. +** +** * zJson must be an RCStr. In other words bJsonIsRCStr must be true. +*/ +struct JsonCache { + sqlite3 *db; /* Database connection */ + int nUsed; /* Number of active entries in the cache */ + JsonParse *a[JSON_CACHE_SIZE]; /* One line for each cache entry */ +}; + /* An instance of this object represents a JSON string ** under construction. Really, this is a generic string accumulator ** that can be and is used to create strings other than JSON. +** +** If the generated string is longer than will fit into the zSpace[] buffer, +** then it will be an RCStr string. This aids with caching of large +** JSON strings. */ struct JsonString { sqlite3_context *pCtx; /* Function context - put error messages here */ @@ -200921,89 +209196,227 @@ struct JsonString { u64 nAlloc; /* Bytes of storage available in zBuf[] */ u64 nUsed; /* Bytes of zBuf[] currently used */ u8 bStatic; /* True if zBuf is static space */ - u8 bErr; /* True if an error has been encountered */ + u8 eErr; /* True if an error has been encountered */ char zSpace[100]; /* Initial static space */ }; -/* JSON type values -*/ -#define JSON_NULL 0 -#define JSON_TRUE 1 -#define JSON_FALSE 2 -#define JSON_INT 3 -#define JSON_REAL 4 -#define JSON_STRING 5 -#define JSON_ARRAY 6 -#define JSON_OBJECT 7 +/* Allowed values for JsonString.eErr */ +#define JSTRING_OOM 0x01 /* Out of memory */ +#define JSTRING_MALFORMED 0x02 /* Malformed JSONB */ +#define JSTRING_ERR 0x04 /* Error already sent to sqlite3_result */ -/* The "subtype" set for JSON values */ +/* The "subtype" set for text JSON values passed through using +** sqlite3_result_subtype() and sqlite3_value_subtype(). +*/ #define JSON_SUBTYPE 74 /* Ascii for "J" */ /* -** Names of the various JSON types: +** Bit values for the flags passed into various SQL function implementations +** via the sqlite3_user_data() value. */ -static const char * const jsonType[] = { - "null", "true", "false", "integer", "real", "text", "array", "object" -}; - -/* Bit values for the JsonNode.jnFlag field -*/ -#define JNODE_RAW 0x01 /* Content is raw, not JSON encoded */ -#define JNODE_ESCAPE 0x02 /* Content is text with \ escapes */ -#define JNODE_REMOVE 0x04 /* Do not output */ -#define JNODE_REPLACE 0x08 /* Replace with JsonNode.u.iReplace */ -#define JNODE_PATCH 0x10 /* Patch with JsonNode.u.pPatch */ -#define JNODE_APPEND 0x20 /* More ARRAY/OBJECT entries at u.iAppend */ -#define JNODE_LABEL 0x40 /* Is a label of an object */ - +#define JSON_JSON 0x01 /* Result is always JSON */ +#define JSON_SQL 0x02 /* Result is always SQL */ +#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */ +#define JSON_ISSET 0x04 /* json_set(), not json_insert() */ +#define JSON_BLOB 0x08 /* Use the BLOB output format */ -/* A single node of parsed JSON -*/ -struct JsonNode { - u8 eType; /* One of the JSON_ type values */ - u8 jnFlags; /* JNODE flags */ - u8 eU; /* Which union element to use */ - u32 n; /* Bytes of content, or number of sub-nodes */ - union { - const char *zJContent; /* 1: Content for INT, REAL, and STRING */ - u32 iAppend; /* 2: More terms for ARRAY and OBJECT */ - u32 iKey; /* 3: Key for ARRAY objects in json_tree() */ - u32 iReplace; /* 4: Replacement content for JNODE_REPLACE */ - JsonNode *pPatch; /* 5: Node chain of patch for JNODE_PATCH */ - } u; -}; -/* A completely parsed JSON string +/* A parsed JSON value. Lifecycle: +** +** 1. JSON comes in and is parsed into a JSONB value in aBlob. The +** original text is stored in zJson. This step is skipped if the +** input is JSONB instead of text JSON. +** +** 2. The aBlob[] array is searched using the JSON path notation, if needed. +** +** 3. Zero or more changes are made to aBlob[] (via json_remove() or +** json_replace() or json_patch() or similar). +** +** 4. New JSON text is generated from the aBlob[] for output. This step +** is skipped if the function is one of the jsonb_* functions that +** returns JSONB instead of text JSON. */ struct JsonParse { - u32 nNode; /* Number of slots of aNode[] used */ - u32 nAlloc; /* Number of slots of aNode[] allocated */ - JsonNode *aNode; /* Array of nodes containing the parse */ - const char *zJson; /* Original JSON string */ - u32 *aUp; /* Index of parent of each node */ - u8 oom; /* Set to true if out of memory */ - u8 nErr; /* Number of errors seen */ - u16 iDepth; /* Nesting depth */ + u8 *aBlob; /* JSONB representation of JSON value */ + u32 nBlob; /* Bytes of aBlob[] actually used */ + u32 nBlobAlloc; /* Bytes allocated to aBlob[]. 0 if aBlob is external */ + char *zJson; /* Json text used for parsing */ + sqlite3 *db; /* The database connection to which this object belongs */ int nJson; /* Length of the zJson string in bytes */ - u32 iHold; /* Replace cache line with the lowest iHold value */ + u32 nJPRef; /* Number of references to this object */ + u32 iErr; /* Error location in zJson[] */ + u16 iDepth; /* Nesting depth */ + u8 nErr; /* Number of errors seen */ + u8 oom; /* Set to true if out of memory */ + u8 bJsonIsRCStr; /* True if zJson is an RCStr */ + u8 hasNonstd; /* True if input uses non-standard features like JSON5 */ + u8 bReadOnly; /* Do not modify. */ + /* Search and edit information. See jsonLookupStep() */ + u8 eEdit; /* Edit operation to apply */ + int delta; /* Size change due to the edit */ + u32 nIns; /* Number of bytes to insert */ + u32 iLabel; /* Location of label if search landed on an object value */ + u8 *aIns; /* Content to be inserted */ }; +/* Allowed values for JsonParse.eEdit */ +#define JEDIT_DEL 1 /* Delete if exists */ +#define JEDIT_REPL 2 /* Overwrite if exists */ +#define JEDIT_INS 3 /* Insert if not exists */ +#define JEDIT_SET 4 /* Insert or overwrite */ + /* ** Maximum nesting depth of JSON for this implementation. ** ** This limit is needed to avoid a stack overflow in the recursive -** descent parser. A depth of 2000 is far deeper than any sane JSON -** should go. +** descent parser. A depth of 1000 is far deeper than any sane JSON +** should go. Historical note: This limit was 2000 prior to version 3.42.0 +*/ +#ifndef SQLITE_JSON_MAX_DEPTH +# define JSON_MAX_DEPTH 1000 +#else +# define JSON_MAX_DEPTH SQLITE_JSON_MAX_DEPTH +#endif + +/* +** Allowed values for the flgs argument to jsonParseFuncArg(); +*/ +#define JSON_EDITABLE 0x01 /* Generate a writable JsonParse object */ +#define JSON_KEEPERROR 0x02 /* Return non-NULL even if there is an error */ + +/************************************************************************** +** Forward references +**************************************************************************/ +static void jsonReturnStringAsBlob(JsonString*); +static int jsonFuncArgMightBeBinary(sqlite3_value *pJson); +static u32 jsonTranslateBlobToText(const JsonParse*,u32,JsonString*); +static void jsonReturnParse(sqlite3_context*,JsonParse*); +static JsonParse *jsonParseFuncArg(sqlite3_context*,sqlite3_value*,u32); +static void jsonParseFree(JsonParse*); +static u32 jsonbPayloadSize(const JsonParse*, u32, u32*); +static u32 jsonUnescapeOneChar(const char*, u32, u32*); + +/************************************************************************** +** Utility routines for dealing with JsonCache objects +**************************************************************************/ + +/* +** Free a JsonCache object. +*/ +static void jsonCacheDelete(JsonCache *p){ + int i; + for(i=0; inUsed; i++){ + jsonParseFree(p->a[i]); + } + sqlite3DbFree(p->db, p); +} +static void jsonCacheDeleteGeneric(void *p){ + jsonCacheDelete((JsonCache*)p); +} + +/* +** Insert a new entry into the cache. If the cache is full, expel +** the least recently used entry. Return SQLITE_OK on success or a +** result code otherwise. +** +** Cache entries are stored in age order, oldest first. +*/ +static int jsonCacheInsert( + sqlite3_context *ctx, /* The SQL statement context holding the cache */ + JsonParse *pParse /* The parse object to be added to the cache */ +){ + JsonCache *p; + + assert( pParse->zJson!=0 ); + assert( pParse->bJsonIsRCStr ); + assert( pParse->delta==0 ); + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ){ + sqlite3 *db = sqlite3_context_db_handle(ctx); + p = sqlite3DbMallocZero(db, sizeof(*p)); + if( p==0 ) return SQLITE_NOMEM; + p->db = db; + sqlite3_set_auxdata(ctx, JSON_CACHE_ID, p, jsonCacheDeleteGeneric); + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ) return SQLITE_NOMEM; + } + if( p->nUsed >= JSON_CACHE_SIZE ){ + jsonParseFree(p->a[0]); + memmove(p->a, &p->a[1], (JSON_CACHE_SIZE-1)*sizeof(p->a[0])); + p->nUsed = JSON_CACHE_SIZE-1; + } + assert( pParse->nBlobAlloc>0 ); + pParse->eEdit = 0; + pParse->nJPRef++; + pParse->bReadOnly = 1; + p->a[p->nUsed] = pParse; + p->nUsed++; + return SQLITE_OK; +} + +/* +** Search for a cached translation the json text supplied by pArg. Return +** the JsonParse object if found. Return NULL if not found. +** +** When a match if found, the matching entry is moved to become the +** most-recently used entry if it isn't so already. +** +** The JsonParse object returned still belongs to the Cache and might +** be deleted at any moment. If the caller whants the JsonParse to +** linger, it needs to increment the nPJRef reference counter. */ -#define JSON_MAX_DEPTH 2000 +static JsonParse *jsonCacheSearch( + sqlite3_context *ctx, /* The SQL statement context holding the cache */ + sqlite3_value *pArg /* Function argument containing SQL text */ +){ + JsonCache *p; + int i; + const char *zJson; + int nJson; + + if( sqlite3_value_type(pArg)!=SQLITE_TEXT ){ + return 0; + } + zJson = (const char*)sqlite3_value_text(pArg); + if( zJson==0 ) return 0; + nJson = sqlite3_value_bytes(pArg); + + p = sqlite3_get_auxdata(ctx, JSON_CACHE_ID); + if( p==0 ){ + return 0; + } + for(i=0; inUsed; i++){ + if( p->a[i]->zJson==zJson ) break; + } + if( i>=p->nUsed ){ + for(i=0; inUsed; i++){ + if( p->a[i]->nJson!=nJson ) continue; + if( memcmp(p->a[i]->zJson, zJson, nJson)==0 ) break; + } + } + if( inUsed ){ + if( inUsed-1 ){ + /* Make the matching entry the most recently used entry */ + JsonParse *tmp = p->a[i]; + memmove(&p->a[i], &p->a[i+1], (p->nUsed-i-1)*sizeof(tmp)); + p->a[p->nUsed-1] = tmp; + i = p->nUsed - 1; + } + assert( p->a[i]->delta==0 ); + return p->a[i]; + }else{ + return 0; + } +} /************************************************************************** ** Utility routines for dealing with JsonString objects **************************************************************************/ -/* Set the JsonString object to an empty string +/* Turn uninitialized bulk memory into a valid JsonString object +** holding a zero-length string. */ -static void jsonZero(JsonString *p){ +static void jsonStringZero(JsonString *p){ p->zBuf = p->zSpace; p->nAlloc = sizeof(p->zSpace); p->nUsed = 0; @@ -201012,53 +209425,51 @@ static void jsonZero(JsonString *p){ /* Initialize the JsonString object */ -static void jsonInit(JsonString *p, sqlite3_context *pCtx){ +static void jsonStringInit(JsonString *p, sqlite3_context *pCtx){ p->pCtx = pCtx; - p->bErr = 0; - jsonZero(p); + p->eErr = 0; + jsonStringZero(p); } - /* Free all allocated memory and reset the JsonString object back to its ** initial state. */ -static void jsonReset(JsonString *p){ - if( !p->bStatic ) sqlite3_free(p->zBuf); - jsonZero(p); +static void jsonStringReset(JsonString *p){ + if( !p->bStatic ) sqlite3RCStrUnref(p->zBuf); + jsonStringZero(p); } - /* Report an out-of-memory (OOM) condition */ -static void jsonOom(JsonString *p){ - p->bErr = 1; - sqlite3_result_error_nomem(p->pCtx); - jsonReset(p); +static void jsonStringOom(JsonString *p){ + p->eErr |= JSTRING_OOM; + if( p->pCtx ) sqlite3_result_error_nomem(p->pCtx); + jsonStringReset(p); } /* Enlarge pJson->zBuf so that it can hold at least N more bytes. ** Return zero on success. Return non-zero on an OOM error */ -static int jsonGrow(JsonString *p, u32 N){ +static int jsonStringGrow(JsonString *p, u32 N){ u64 nTotal = NnAlloc ? p->nAlloc*2 : p->nAlloc+N+10; char *zNew; if( p->bStatic ){ - if( p->bErr ) return 1; - zNew = sqlite3_malloc64(nTotal); + if( p->eErr ) return 1; + zNew = sqlite3RCStrNew(nTotal); if( zNew==0 ){ - jsonOom(p); + jsonStringOom(p); return SQLITE_NOMEM; } memcpy(zNew, p->zBuf, (size_t)p->nUsed); p->zBuf = zNew; p->bStatic = 0; }else{ - zNew = sqlite3_realloc64(p->zBuf, nTotal); - if( zNew==0 ){ - jsonOom(p); + p->zBuf = sqlite3RCStrResize(p->zBuf, nTotal); + if( p->zBuf==0 ){ + p->eErr |= JSTRING_OOM; + jsonStringZero(p); return SQLITE_NOMEM; } - p->zBuf = zNew; } p->nAlloc = nTotal; return SQLITE_OK; @@ -201066,18 +209477,41 @@ static int jsonGrow(JsonString *p, u32 N){ /* Append N bytes from zIn onto the end of the JsonString string. */ -static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){ - if( N==0 ) return; - if( (N+p->nUsed >= p->nAlloc) && jsonGrow(p,N)!=0 ) return; +static SQLITE_NOINLINE void jsonStringExpandAndAppend( + JsonString *p, + const char *zIn, + u32 N +){ + assert( N>0 ); + if( jsonStringGrow(p,N) ) return; memcpy(p->zBuf+p->nUsed, zIn, N); p->nUsed += N; } +static void jsonAppendRaw(JsonString *p, const char *zIn, u32 N){ + if( N==0 ) return; + if( N+p->nUsed >= p->nAlloc ){ + jsonStringExpandAndAppend(p,zIn,N); + }else{ + memcpy(p->zBuf+p->nUsed, zIn, N); + p->nUsed += N; + } +} +static void jsonAppendRawNZ(JsonString *p, const char *zIn, u32 N){ + assert( N>0 ); + if( N+p->nUsed >= p->nAlloc ){ + jsonStringExpandAndAppend(p,zIn,N); + }else{ + memcpy(p->zBuf+p->nUsed, zIn, N); + p->nUsed += N; + } +} + /* Append formatted text (not to exceed N bytes) to the JsonString. */ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ va_list ap; - if( (p->nUsed + N >= p->nAlloc) && jsonGrow(p, N) ) return; + if( (p->nUsed + N >= p->nAlloc) && jsonStringGrow(p, N) ) return; va_start(ap, zFormat); sqlite3_vsnprintf(N, p->zBuf+p->nUsed, zFormat, ap); va_end(ap); @@ -201086,10 +209520,38 @@ static void jsonPrintf(int N, JsonString *p, const char *zFormat, ...){ /* Append a single character */ -static void jsonAppendChar(JsonString *p, char c){ - if( p->nUsed>=p->nAlloc && jsonGrow(p,1)!=0 ) return; +static SQLITE_NOINLINE void jsonAppendCharExpand(JsonString *p, char c){ + if( jsonStringGrow(p,1) ) return; p->zBuf[p->nUsed++] = c; } +static void jsonAppendChar(JsonString *p, char c){ + if( p->nUsed>=p->nAlloc ){ + jsonAppendCharExpand(p,c); + }else{ + p->zBuf[p->nUsed++] = c; + } +} + +/* Remove a single character from the end of the string +*/ +static void jsonStringTrimOneChar(JsonString *p){ + if( p->eErr==0 ){ + assert( p->nUsed>0 ); + p->nUsed--; + } +} + + +/* Make sure there is a zero terminator on p->zBuf[] +** +** Return true on success. Return false if an OOM prevents this +** from happening. +*/ +static int jsonStringTerminate(JsonString *p){ + jsonAppendChar(p, 0); + jsonStringTrimOneChar(p); + return p->eErr==0; +} /* Append a comma separator to the output buffer, if the previous ** character is not '[' or '{'. @@ -201098,25 +209560,76 @@ static void jsonAppendSeparator(JsonString *p){ char c; if( p->nUsed==0 ) return; c = p->zBuf[p->nUsed-1]; - if( c!='[' && c!='{' ) jsonAppendChar(p, ','); + if( c=='[' || c=='{' ) return; + jsonAppendChar(p, ','); } /* Append the N-byte string in zIn to the end of the JsonString string -** under construction. Enclose the string in "..." and escape -** any double-quotes or backslash characters contained within the +** under construction. Enclose the string in double-quotes ("...") and +** escape any double-quotes or backslash characters contained within the ** string. +** +** This routine is a high-runner. There is a measurable performance +** increase associated with unwinding the jsonIsOk[] loop. */ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ - u32 i; - if( zIn==0 || ((N+p->nUsed+2 >= p->nAlloc) && jsonGrow(p,N+2)!=0) ) return; + u32 k; + u8 c; + const u8 *z = (const u8*)zIn; + if( z==0 ) return; + if( (N+p->nUsed+2 >= p->nAlloc) && jsonStringGrow(p,N+2)!=0 ) return; p->zBuf[p->nUsed++] = '"'; - for(i=0; i=N ){ + while( k=N ){ + if( k>0 ){ + memcpy(&p->zBuf[p->nUsed], z, k); + p->nUsed += k; + } + break; + } + if( k>0 ){ + memcpy(&p->zBuf[p->nUsed], z, k); + p->nUsed += k; + z += k; + N -= k; + } + c = z[0]; if( c=='"' || c=='\\' ){ json_simple_escape: - if( (p->nUsed+N+3-i > p->nAlloc) && jsonGrow(p,N+3-i)!=0 ) return; + if( (p->nUsed+N+3 > p->nAlloc) && jsonStringGrow(p,N+3)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; - }else if( c<=0x1f ){ + p->zBuf[p->nUsed++] = c; + }else if( c=='\'' ){ + p->zBuf[p->nUsed++] = c; + }else{ static const char aSpecial[] = { 0, 0, 0, 0, 0, 0, 0, 0, 'b', 't', 'n', 0, 'f', 'r', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 @@ -201127,39 +209640,44 @@ static void jsonAppendString(JsonString *p, const char *zIn, u32 N){ assert( aSpecial['\n']=='n' ); assert( aSpecial['\r']=='r' ); assert( aSpecial['\t']=='t' ); + assert( c>=0 && cnUsed+N+7+i > p->nAlloc) && jsonGrow(p,N+7-i)!=0 ) return; + if( (p->nUsed+N+7 > p->nAlloc) && jsonStringGrow(p,N+7)!=0 ) return; p->zBuf[p->nUsed++] = '\\'; p->zBuf[p->nUsed++] = 'u'; p->zBuf[p->nUsed++] = '0'; p->zBuf[p->nUsed++] = '0'; - p->zBuf[p->nUsed++] = '0' + (c>>4); - c = "0123456789abcdef"[c&0xf]; + p->zBuf[p->nUsed++] = "0123456789abcdef"[c>>4]; + p->zBuf[p->nUsed++] = "0123456789abcdef"[c&0xf]; } - p->zBuf[p->nUsed++] = c; + z++; + N--; } p->zBuf[p->nUsed++] = '"'; assert( p->nUsednAlloc ); } /* -** Append a function parameter value to the JSON string under -** construction. +** Append an sqlite3_value (such as a function parameter) to the JSON +** string under construction in p. */ -static void jsonAppendValue( +static void jsonAppendSqlValue( JsonString *p, /* Append to this JSON string */ sqlite3_value *pValue /* Value to append */ ){ switch( sqlite3_value_type(pValue) ){ case SQLITE_NULL: { - jsonAppendRaw(p, "null", 4); + jsonAppendRawNZ(p, "null", 4); break; } - case SQLITE_INTEGER: case SQLITE_FLOAT: { + jsonPrintf(100, p, "%!0.15g", sqlite3_value_double(pValue)); + break; + } + case SQLITE_INTEGER: { const char *z = (const char*)sqlite3_value_text(pValue); u32 n = (u32)sqlite3_value_bytes(pValue); jsonAppendRaw(p, z, n); @@ -201176,184 +209694,127 @@ static void jsonAppendValue( break; } default: { - if( p->bErr==0 ){ + if( jsonFuncArgMightBeBinary(pValue) ){ + JsonParse px; + memset(&px, 0, sizeof(px)); + px.aBlob = (u8*)sqlite3_value_blob(pValue); + px.nBlob = sqlite3_value_bytes(pValue); + jsonTranslateBlobToText(&px, 0, p); + }else if( p->eErr==0 ){ sqlite3_result_error(p->pCtx, "JSON cannot hold BLOB values", -1); - p->bErr = 2; - jsonReset(p); + p->eErr = JSTRING_ERR; + jsonStringReset(p); } break; } } } - -/* Make the JSON in p the result of the SQL function. +/* Make the text in p (which is probably a generated JSON text string) +** the result of the SQL function. +** +** The JsonString is reset. +** +** If pParse and ctx are both non-NULL, then the SQL string in p is +** loaded into the zJson field of the pParse object as a RCStr and the +** pParse is added to the cache. */ -static void jsonResult(JsonString *p){ - if( p->bErr==0 ){ - sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, - p->bStatic ? SQLITE_TRANSIENT : sqlite3_free, - SQLITE_UTF8); - jsonZero(p); +static void jsonReturnString( + JsonString *p, /* String to return */ + JsonParse *pParse, /* JSONB source or NULL */ + sqlite3_context *ctx /* Where to cache */ +){ + assert( (pParse!=0)==(ctx!=0) ); + assert( ctx==0 || ctx==p->pCtx ); + if( p->eErr==0 ){ + int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(p->pCtx)); + if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(p); + }else if( p->bStatic ){ + sqlite3_result_text64(p->pCtx, p->zBuf, p->nUsed, + SQLITE_TRANSIENT, SQLITE_UTF8); + }else if( jsonStringTerminate(p) ){ + if( pParse && pParse->bJsonIsRCStr==0 && pParse->nBlobAlloc>0 ){ + int rc; + pParse->zJson = sqlite3RCStrRef(p->zBuf); + pParse->nJson = p->nUsed; + pParse->bJsonIsRCStr = 1; + rc = jsonCacheInsert(ctx, pParse); + if( rc==SQLITE_NOMEM ){ + sqlite3_result_error_nomem(ctx); + jsonStringReset(p); + return; + } + } + sqlite3_result_text64(p->pCtx, sqlite3RCStrRef(p->zBuf), p->nUsed, + sqlite3RCStrUnref, + SQLITE_UTF8); + }else{ + sqlite3_result_error_nomem(p->pCtx); + } + }else if( p->eErr & JSTRING_OOM ){ + sqlite3_result_error_nomem(p->pCtx); + }else if( p->eErr & JSTRING_MALFORMED ){ + sqlite3_result_error(p->pCtx, "malformed JSON", -1); } - assert( p->bStatic ); + jsonStringReset(p); } /************************************************************************** -** Utility routines for dealing with JsonNode and JsonParse objects +** Utility routines for dealing with JsonParse objects **************************************************************************/ -/* -** Return the number of consecutive JsonNode slots need to represent -** the parsed JSON at pNode. The minimum answer is 1. For ARRAY and -** OBJECT types, the number might be larger. -** -** Appended elements are not counted. The value returned is the number -** by which the JsonNode counter should increment in order to go to the -** next peer value. -*/ -static u32 jsonNodeSize(JsonNode *pNode){ - return pNode->eType>=JSON_ARRAY ? pNode->n+1 : 1; -} - /* ** Reclaim all memory allocated by a JsonParse object. But do not ** delete the JsonParse object itself. */ static void jsonParseReset(JsonParse *pParse){ - sqlite3_free(pParse->aNode); - pParse->aNode = 0; - pParse->nNode = 0; - pParse->nAlloc = 0; - sqlite3_free(pParse->aUp); - pParse->aUp = 0; + assert( pParse->nJPRef<=1 ); + if( pParse->bJsonIsRCStr ){ + sqlite3RCStrUnref(pParse->zJson); + pParse->zJson = 0; + pParse->nJson = 0; + pParse->bJsonIsRCStr = 0; + } + if( pParse->nBlobAlloc ){ + sqlite3DbFree(pParse->db, pParse->aBlob); + pParse->aBlob = 0; + pParse->nBlob = 0; + pParse->nBlobAlloc = 0; + } } /* -** Free a JsonParse object that was obtained from sqlite3_malloc(). +** Decrement the reference count on the JsonParse object. When the +** count reaches zero, free the object. */ static void jsonParseFree(JsonParse *pParse){ - jsonParseReset(pParse); - sqlite3_free(pParse); -} - -/* -** Convert the JsonNode pNode into a pure JSON string and -** append to pOut. Subsubstructure is also included. Return -** the number of JsonNode objects that are encoded. -*/ -static void jsonRenderNode( - JsonNode *pNode, /* The node to render */ - JsonString *pOut, /* Write JSON here */ - sqlite3_value **aReplace /* Replacement values */ -){ - assert( pNode!=0 ); - if( pNode->jnFlags & (JNODE_REPLACE|JNODE_PATCH) ){ - if( (pNode->jnFlags & JNODE_REPLACE)!=0 && ALWAYS(aReplace!=0) ){ - assert( pNode->eU==4 ); - jsonAppendValue(pOut, aReplace[pNode->u.iReplace]); - return; - } - assert( pNode->eU==5 ); - pNode = pNode->u.pPatch; - } - switch( pNode->eType ){ - default: { - assert( pNode->eType==JSON_NULL ); - jsonAppendRaw(pOut, "null", 4); - break; - } - case JSON_TRUE: { - jsonAppendRaw(pOut, "true", 4); - break; - } - case JSON_FALSE: { - jsonAppendRaw(pOut, "false", 5); - break; - } - case JSON_STRING: { - if( pNode->jnFlags & JNODE_RAW ){ - assert( pNode->eU==1 ); - jsonAppendString(pOut, pNode->u.zJContent, pNode->n); - break; - } - /* no break */ deliberate_fall_through - } - case JSON_REAL: - case JSON_INT: { - assert( pNode->eU==1 ); - jsonAppendRaw(pOut, pNode->u.zJContent, pNode->n); - break; - } - case JSON_ARRAY: { - u32 j = 1; - jsonAppendChar(pOut, '['); - for(;;){ - while( j<=pNode->n ){ - if( (pNode[j].jnFlags & JNODE_REMOVE)==0 ){ - jsonAppendSeparator(pOut); - jsonRenderNode(&pNode[j], pOut, aReplace); - } - j += jsonNodeSize(&pNode[j]); - } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - assert( pNode->eU==2 ); - pNode = &pNode[pNode->u.iAppend]; - j = 1; - } - jsonAppendChar(pOut, ']'); - break; - } - case JSON_OBJECT: { - u32 j = 1; - jsonAppendChar(pOut, '{'); - for(;;){ - while( j<=pNode->n ){ - if( (pNode[j+1].jnFlags & JNODE_REMOVE)==0 ){ - jsonAppendSeparator(pOut); - jsonRenderNode(&pNode[j], pOut, aReplace); - jsonAppendChar(pOut, ':'); - jsonRenderNode(&pNode[j+1], pOut, aReplace); - } - j += 1 + jsonNodeSize(&pNode[j+1]); - } - if( (pNode->jnFlags & JNODE_APPEND)==0 ) break; - assert( pNode->eU==2 ); - pNode = &pNode[pNode->u.iAppend]; - j = 1; - } - jsonAppendChar(pOut, '}'); - break; + if( pParse ){ + if( pParse->nJPRef>1 ){ + pParse->nJPRef--; + }else{ + jsonParseReset(pParse); + sqlite3DbFree(pParse->db, pParse); } } } -/* -** Return a JsonNode and all its descendents as a JSON string. -*/ -static void jsonReturnJson( - JsonNode *pNode, /* Node to return */ - sqlite3_context *pCtx, /* Return value for this function */ - sqlite3_value **aReplace /* Array of replacement values */ -){ - JsonString s; - jsonInit(&s, pCtx); - jsonRenderNode(pNode, &s, aReplace); - jsonResult(&s); - sqlite3_result_subtype(pCtx, JSON_SUBTYPE); -} +/************************************************************************** +** Utility routines for the JSON text parser +**************************************************************************/ /* ** Translate a single byte of Hex into an integer. -** This routine only works if h really is a valid hexadecimal -** character: 0..9a..fA..F +** This routine only gives a correct answer if h really is a valid hexadecimal +** character: 0..9a..fA..F. But unlike sqlite3HexToInt(), it does not +** assert() if the digit is not hex. */ static u8 jsonHexToInt(int h){ - assert( (h>='0' && h<='9') || (h>='a' && h<='f') || (h>='A' && h<='F') ); +#ifdef SQLITE_ASCII + h += 9*(1&(h>>6)); +#endif #ifdef SQLITE_EBCDIC h += 9*(1&~(h>>4)); -#else - h += 9*(1&(h>>6)); #endif return (u8)(h & 0xf); } @@ -201363,10 +209824,6 @@ static u8 jsonHexToInt(int h){ */ static u32 jsonHexToInt4(const char *z){ u32 v; - assert( sqlite3Isxdigit(z[0]) ); - assert( sqlite3Isxdigit(z[1]) ); - assert( sqlite3Isxdigit(z[2]) ); - assert( sqlite3Isxdigit(z[3]) ); v = (jsonHexToInt(z[0])<<12) + (jsonHexToInt(z[1])<<8) + (jsonHexToInt(z[2])<<4) @@ -201375,420 +209832,1099 @@ static u32 jsonHexToInt4(const char *z){ } /* -** Make the JsonNode the return value of the function. +** Return true if z[] begins with 2 (or more) hexadecimal digits */ -static void jsonReturn( - JsonNode *pNode, /* Node to return */ - sqlite3_context *pCtx, /* Return value for this function */ - sqlite3_value **aReplace /* Array of replacement values */ -){ - switch( pNode->eType ){ - default: { - assert( pNode->eType==JSON_NULL ); - sqlite3_result_null(pCtx); - break; - } - case JSON_TRUE: { - sqlite3_result_int(pCtx, 1); - break; - } - case JSON_FALSE: { - sqlite3_result_int(pCtx, 0); - break; - } - case JSON_INT: { - sqlite3_int64 i = 0; - const char *z; - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - if( z[0]=='-' ){ z++; } - while( z[0]>='0' && z[0]<='9' ){ - unsigned v = *(z++) - '0'; - if( i>=LARGEST_INT64/10 ){ - if( i>LARGEST_INT64/10 ) goto int_as_real; - if( z[0]>='0' && z[0]<='9' ) goto int_as_real; - if( v==9 ) goto int_as_real; - if( v==8 ){ - if( pNode->u.zJContent[0]=='-' ){ - sqlite3_result_int64(pCtx, SMALLEST_INT64); - goto int_done; - }else{ - goto int_as_real; +static int jsonIs2Hex(const char *z){ + return sqlite3Isxdigit(z[0]) && sqlite3Isxdigit(z[1]); +} + +/* +** Return true if z[] begins with 4 (or more) hexadecimal digits +*/ +static int jsonIs4Hex(const char *z){ + return jsonIs2Hex(z) && jsonIs2Hex(&z[2]); +} + +/* +** Return the number of bytes of JSON5 whitespace at the beginning of +** the input string z[]. +** +** JSON5 whitespace consists of any of the following characters: +** +** Unicode UTF-8 Name +** U+0009 09 horizontal tab +** U+000a 0a line feed +** U+000b 0b vertical tab +** U+000c 0c form feed +** U+000d 0d carriage return +** U+0020 20 space +** U+00a0 c2 a0 non-breaking space +** U+1680 e1 9a 80 ogham space mark +** U+2000 e2 80 80 en quad +** U+2001 e2 80 81 em quad +** U+2002 e2 80 82 en space +** U+2003 e2 80 83 em space +** U+2004 e2 80 84 three-per-em space +** U+2005 e2 80 85 four-per-em space +** U+2006 e2 80 86 six-per-em space +** U+2007 e2 80 87 figure space +** U+2008 e2 80 88 punctuation space +** U+2009 e2 80 89 thin space +** U+200a e2 80 8a hair space +** U+2028 e2 80 a8 line separator +** U+2029 e2 80 a9 paragraph separator +** U+202f e2 80 af narrow no-break space (NNBSP) +** U+205f e2 81 9f medium mathematical space (MMSP) +** U+3000 e3 80 80 ideographical space +** U+FEFF ef bb bf byte order mark +** +** In addition, comments between '/', '*' and '*', '/' and +** from '/', '/' to end-of-line are also considered to be whitespace. +*/ +static int json5Whitespace(const char *zIn){ + int n = 0; + const u8 *z = (u8*)zIn; + while( 1 /*exit by "goto whitespace_done"*/ ){ + switch( z[n] ){ + case 0x09: + case 0x0a: + case 0x0b: + case 0x0c: + case 0x0d: + case 0x20: { + n++; + break; + } + case '/': { + if( z[n+1]=='*' && z[n+2]!=0 ){ + int j; + for(j=n+3; z[j]!='/' || z[j-1]!='*'; j++){ + if( z[j]==0 ) goto whitespace_done; + } + n = j+1; + break; + }else if( z[n+1]=='/' ){ + int j; + char c; + for(j=n+2; (c = z[j])!=0; j++){ + if( c=='\n' || c=='\r' ) break; + if( 0xe2==(u8)c && 0x80==(u8)z[j+1] + && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2]) + ){ + j += 2; + break; } } + n = j; + if( z[n] ) n++; + break; } - i = i*10 + v; + goto whitespace_done; } - if( pNode->u.zJContent[0]=='-' ){ i = -i; } - sqlite3_result_int64(pCtx, i); - int_done: - break; - int_as_real: ; /* no break */ deliberate_fall_through - } - case JSON_REAL: { - double r; -#ifdef SQLITE_AMALGAMATION - const char *z; - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); -#else - assert( pNode->eU==1 ); - r = strtod(pNode->u.zJContent, 0); -#endif - sqlite3_result_double(pCtx, r); - break; - } - case JSON_STRING: { -#if 0 /* Never happens because JNODE_RAW is only set by json_set(), - ** json_insert() and json_replace() and those routines do not - ** call jsonReturn() */ - if( pNode->jnFlags & JNODE_RAW ){ - assert( pNode->eU==1 ); - sqlite3_result_text(pCtx, pNode->u.zJContent, pNode->n, - SQLITE_TRANSIENT); - }else -#endif - assert( (pNode->jnFlags & JNODE_RAW)==0 ); - if( (pNode->jnFlags & JNODE_ESCAPE)==0 ){ - /* JSON formatted without any backslash-escapes */ - assert( pNode->eU==1 ); - sqlite3_result_text(pCtx, pNode->u.zJContent+1, pNode->n-2, - SQLITE_TRANSIENT); - }else{ - /* Translate JSON formatted string into raw text */ - u32 i; - u32 n = pNode->n; - const char *z; - char *zOut; - u32 j; - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - zOut = sqlite3_malloc( n+1 ); - if( zOut==0 ){ - sqlite3_result_error_nomem(pCtx); + case 0xc2: { + if( z[n+1]==0xa0 ){ + n += 2; break; } - for(i=1, j=0; i>6)); - zOut[j++] = 0x80 | (v&0x3f); - }else{ - u32 vlo; - if( (v&0xfc00)==0xd800 - && i>18); - zOut[j++] = 0x80 | ((v>>12)&0x3f); - zOut[j++] = 0x80 | ((v>>6)&0x3f); - zOut[j++] = 0x80 | (v&0x3f); - }else{ - zOut[j++] = 0xe0 | (v>>12); - zOut[j++] = 0x80 | ((v>>6)&0x3f); - zOut[j++] = 0x80 | (v&0x3f); - } - } - }else{ - if( c=='b' ){ - c = '\b'; - }else if( c=='f' ){ - c = '\f'; - }else if( c=='n' ){ - c = '\n'; - }else if( c=='r' ){ - c = '\r'; - }else if( c=='t' ){ - c = '\t'; - } - zOut[j++] = c; - } + goto whitespace_done; + } + case 0xe1: { + if( z[n+1]==0x9a && z[n+2]==0x80 ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xe2: { + if( z[n+1]==0x80 ){ + u8 c = z[n+2]; + if( c<0x80 ) goto whitespace_done; + if( c<=0x8a || c==0xa8 || c==0xa9 || c==0xaf ){ + n += 3; + break; } + }else if( z[n+1]==0x81 && z[n+2]==0x9f ){ + n += 3; + break; } - zOut[j] = 0; - sqlite3_result_text(pCtx, zOut, j, sqlite3_free); + goto whitespace_done; + } + case 0xe3: { + if( z[n+1]==0x80 && z[n+2]==0x80 ){ + n += 3; + break; + } + goto whitespace_done; + } + case 0xef: { + if( z[n+1]==0xbb && z[n+2]==0xbf ){ + n += 3; + break; + } + goto whitespace_done; + } + default: { + goto whitespace_done; } - break; - } - case JSON_ARRAY: - case JSON_OBJECT: { - jsonReturnJson(pNode, pCtx, aReplace); - break; } } + whitespace_done: + return n; } -/* Forward reference */ -static int jsonParseAddNode(JsonParse*,u32,u32,const char*); - /* -** A macro to hint to the compiler that a function should not be -** inlined. +** Extra floating-point literals to allow in JSON. */ -#if defined(__GNUC__) -# define JSON_NOINLINE __attribute__((noinline)) -#elif defined(_MSC_VER) && _MSC_VER>=1310 -# define JSON_NOINLINE __declspec(noinline) -#else -# define JSON_NOINLINE -#endif +static const struct NanInfName { + char c1; + char c2; + char n; + char eType; + char nRepl; + char *zMatch; + char *zRepl; +} aNanInfName[] = { + { 'i', 'I', 3, JSONB_FLOAT, 7, "inf", "9.0e999" }, + { 'i', 'I', 8, JSONB_FLOAT, 7, "infinity", "9.0e999" }, + { 'n', 'N', 3, JSONB_NULL, 4, "NaN", "null" }, + { 'q', 'Q', 4, JSONB_NULL, 4, "QNaN", "null" }, + { 's', 'S', 4, JSONB_NULL, 4, "SNaN", "null" }, +}; -static JSON_NOINLINE int jsonParseAddNodeExpand( - JsonParse *pParse, /* Append the node to this object */ - u32 eType, /* Node type */ - u32 n, /* Content size or sub-node count */ - const char *zContent /* Content */ +/* +** Report the wrong number of arguments for json_insert(), json_replace() +** or json_set(). +*/ +static void jsonWrongNumArgs( + sqlite3_context *pCtx, + const char *zFuncName ){ - u32 nNew; - JsonNode *pNew; - assert( pParse->nNode>=pParse->nAlloc ); - if( pParse->oom ) return -1; - nNew = pParse->nAlloc*2 + 10; - pNew = sqlite3_realloc64(pParse->aNode, sizeof(JsonNode)*nNew); - if( pNew==0 ){ - pParse->oom = 1; - return -1; + char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments", + zFuncName); + sqlite3_result_error(pCtx, zMsg, -1); + sqlite3_free(zMsg); +} + +/**************************************************************************** +** Utility routines for dealing with the binary BLOB representation of JSON +****************************************************************************/ + +/* +** Expand pParse->aBlob so that it holds at least N bytes. +** +** Return the number of errors. +*/ +static int jsonBlobExpand(JsonParse *pParse, u32 N){ + u8 *aNew; + u32 t; + assert( N>pParse->nBlobAlloc ); + if( pParse->nBlobAlloc==0 ){ + t = 100; + }else{ + t = pParse->nBlobAlloc*2; } - pParse->nAlloc = nNew; - pParse->aNode = pNew; - assert( pParse->nNodenAlloc ); - return jsonParseAddNode(pParse, eType, n, zContent); + if( tdb, pParse->aBlob, t); + if( aNew==0 ){ pParse->oom = 1; return 1; } + pParse->aBlob = aNew; + pParse->nBlobAlloc = t; + return 0; } /* -** Create a new JsonNode instance based on the arguments and append that -** instance to the JsonParse. Return the index in pParse->aNode[] of the -** new node, or -1 if a memory allocation fails. +** If pParse->aBlob is not previously editable (because it is taken +** from sqlite3_value_blob(), as indicated by the fact that +** pParse->nBlobAlloc==0 and pParse->nBlob>0) then make it editable +** by making a copy into space obtained from malloc. +** +** Return true on success. Return false on OOM. */ -static int jsonParseAddNode( - JsonParse *pParse, /* Append the node to this object */ - u32 eType, /* Node type */ - u32 n, /* Content size or sub-node count */ - const char *zContent /* Content */ +static int jsonBlobMakeEditable(JsonParse *pParse, u32 nExtra){ + u8 *aOld; + u32 nSize; + assert( !pParse->bReadOnly ); + if( pParse->oom ) return 0; + if( pParse->nBlobAlloc>0 ) return 1; + aOld = pParse->aBlob; + nSize = pParse->nBlob + nExtra; + pParse->aBlob = 0; + if( jsonBlobExpand(pParse, nSize) ){ + return 0; + } + assert( pParse->nBlobAlloc >= pParse->nBlob + nExtra ); + memcpy(pParse->aBlob, aOld, pParse->nBlob); + return 1; +} + +/* Expand pParse->aBlob and append one bytes. +*/ +static SQLITE_NOINLINE void jsonBlobExpandAndAppendOneByte( + JsonParse *pParse, + u8 c ){ - JsonNode *p; - if( pParse->aNode==0 || pParse->nNode>=pParse->nAlloc ){ - return jsonParseAddNodeExpand(pParse, eType, n, zContent); + jsonBlobExpand(pParse, pParse->nBlob+1); + if( pParse->oom==0 ){ + assert( pParse->nBlob+1<=pParse->nBlobAlloc ); + pParse->aBlob[pParse->nBlob++] = c; } - p = &pParse->aNode[pParse->nNode]; - p->eType = (u8)eType; - p->jnFlags = 0; - VVA( p->eU = zContent ? 1 : 0 ); - p->n = n; - p->u.zJContent = zContent; - return pParse->nNode++; } -/* -** Return true if z[] begins with 4 (or more) hexadecimal digits +/* Append a single character. */ -static int jsonIs4Hex(const char *z){ - int i; - for(i=0; i<4; i++) if( !sqlite3Isxdigit(z[i]) ) return 0; +static void jsonBlobAppendOneByte(JsonParse *pParse, u8 c){ + if( pParse->nBlob >= pParse->nBlobAlloc ){ + jsonBlobExpandAndAppendOneByte(pParse, c); + }else{ + pParse->aBlob[pParse->nBlob++] = c; + } +} + +/* Slow version of jsonBlobAppendNode() that first resizes the +** pParse->aBlob structure. +*/ +static void jsonBlobAppendNode(JsonParse*,u8,u32,const void*); +static SQLITE_NOINLINE void jsonBlobExpandAndAppendNode( + JsonParse *pParse, + u8 eType, + u32 szPayload, + const void *aPayload +){ + if( jsonBlobExpand(pParse, pParse->nBlob+szPayload+9) ) return; + jsonBlobAppendNode(pParse, eType, szPayload, aPayload); +} + + +/* Append an node type byte together with the payload size and +** possibly also the payload. +** +** If aPayload is not NULL, then it is a pointer to the payload which +** is also appended. If aPayload is NULL, the pParse->aBlob[] array +** is resized (if necessary) so that it is big enough to hold the +** payload, but the payload is not appended and pParse->nBlob is left +** pointing to where the first byte of payload will eventually be. +*/ +static void jsonBlobAppendNode( + JsonParse *pParse, /* The JsonParse object under construction */ + u8 eType, /* Node type. One of JSONB_* */ + u32 szPayload, /* Number of bytes of payload */ + const void *aPayload /* The payload. Might be NULL */ +){ + u8 *a; + if( pParse->nBlob+szPayload+9 > pParse->nBlobAlloc ){ + jsonBlobExpandAndAppendNode(pParse,eType,szPayload,aPayload); + return; + } + assert( pParse->aBlob!=0 ); + a = &pParse->aBlob[pParse->nBlob]; + if( szPayload<=11 ){ + a[0] = eType | (szPayload<<4); + pParse->nBlob += 1; + }else if( szPayload<=0xff ){ + a[0] = eType | 0xc0; + a[1] = szPayload & 0xff; + pParse->nBlob += 2; + }else if( szPayload<=0xffff ){ + a[0] = eType | 0xd0; + a[1] = (szPayload >> 8) & 0xff; + a[2] = szPayload & 0xff; + pParse->nBlob += 3; + }else{ + a[0] = eType | 0xe0; + a[1] = (szPayload >> 24) & 0xff; + a[2] = (szPayload >> 16) & 0xff; + a[3] = (szPayload >> 8) & 0xff; + a[4] = szPayload & 0xff; + pParse->nBlob += 5; + } + if( aPayload ){ + pParse->nBlob += szPayload; + memcpy(&pParse->aBlob[pParse->nBlob-szPayload], aPayload, szPayload); + } +} + +/* Change the payload size for the node at index i to be szPayload. +*/ +static int jsonBlobChangePayloadSize( + JsonParse *pParse, + u32 i, + u32 szPayload +){ + u8 *a; + u8 szType; + u8 nExtra; + u8 nNeeded; + int delta; + if( pParse->oom ) return 0; + a = &pParse->aBlob[i]; + szType = a[0]>>4; + if( szType<=11 ){ + nExtra = 0; + }else if( szType==12 ){ + nExtra = 1; + }else if( szType==13 ){ + nExtra = 2; + }else{ + nExtra = 4; + } + if( szPayload<=11 ){ + nNeeded = 0; + }else if( szPayload<=0xff ){ + nNeeded = 1; + }else if( szPayload<=0xffff ){ + nNeeded = 2; + }else{ + nNeeded = 4; + } + delta = nNeeded - nExtra; + if( delta ){ + u32 newSize = pParse->nBlob + delta; + if( delta>0 ){ + if( newSize>pParse->nBlobAlloc && jsonBlobExpand(pParse, newSize) ){ + return 0; /* OOM error. Error state recorded in pParse->oom. */ + } + a = &pParse->aBlob[i]; + memmove(&a[1+delta], &a[1], pParse->nBlob - (i+1)); + }else{ + memmove(&a[1], &a[1-delta], pParse->nBlob - (i+1-delta)); + } + pParse->nBlob = newSize; + } + if( nNeeded==0 ){ + a[0] = (a[0] & 0x0f) | (szPayload<<4); + }else if( nNeeded==1 ){ + a[0] = (a[0] & 0x0f) | 0xc0; + a[1] = szPayload & 0xff; + }else if( nNeeded==2 ){ + a[0] = (a[0] & 0x0f) | 0xd0; + a[1] = (szPayload >> 8) & 0xff; + a[2] = szPayload & 0xff; + }else{ + a[0] = (a[0] & 0x0f) | 0xe0; + a[1] = (szPayload >> 24) & 0xff; + a[2] = (szPayload >> 16) & 0xff; + a[3] = (szPayload >> 8) & 0xff; + a[4] = szPayload & 0xff; + } + return delta; +} + +/* +** If z[0] is 'u' and is followed by exactly 4 hexadecimal character, +** then set *pOp to JSONB_TEXTJ and return true. If not, do not make +** any changes to *pOp and return false. +*/ +static int jsonIs4HexB(const char *z, int *pOp){ + if( z[0]!='u' ) return 0; + if( !jsonIs4Hex(&z[1]) ) return 0; + *pOp = JSONB_TEXTJ; return 1; } /* -** Parse a single JSON value which begins at pParse->zJson[i]. Return the -** index of the first character past the end of the value parsed. +** Check a single element of the JSONB in pParse for validity. +** +** The element to be checked starts at offset i and must end at on the +** last byte before iEnd. +** +** Return 0 if everything is correct. Return the 1-based byte offset of the +** error if a problem is detected. (In other words, if the error is at offset +** 0, return 1). +*/ +static u32 jsonbValidityCheck( + const JsonParse *pParse, /* Input JSONB. Only aBlob and nBlob are used */ + u32 i, /* Start of element as pParse->aBlob[i] */ + u32 iEnd, /* One more than the last byte of the element */ + u32 iDepth /* Current nesting depth */ +){ + u32 n, sz, j, k; + const u8 *z; + u8 x; + if( iDepth>JSON_MAX_DEPTH ) return i+1; + sz = 0; + n = jsonbPayloadSize(pParse, i, &sz); + if( NEVER(n==0) ) return i+1; /* Checked by caller */ + if( NEVER(i+n+sz!=iEnd) ) return i+1; /* Checked by caller */ + z = pParse->aBlob; + x = z[i] & 0x0f; + switch( x ){ + case JSONB_NULL: + case JSONB_TRUE: + case JSONB_FALSE: { + return n+sz==1 ? 0 : i+1; + } + case JSONB_INT: { + if( sz<1 ) return i+1; + j = i+n; + if( z[j]=='-' ){ + j++; + if( sz<2 ) return i+1; + } + k = i+n+sz; + while( jk ) return j+1; + if( z[j+1]!='.' && z[j+1]!='e' && z[j+1]!='E' ) return j+1; + j++; + } + for(; j0 ) return j+1; + if( x==JSONB_FLOAT && (j==k-1 || !sqlite3Isdigit(z[j+1])) ){ + return j+1; + } + seen = 1; + continue; + } + if( z[j]=='e' || z[j]=='E' ){ + if( seen==2 ) return j+1; + if( j==k-1 ) return j+1; + if( z[j+1]=='+' || z[j+1]=='-' ){ + j++; + if( j==k-1 ) return j+1; + } + seen = 2; + continue; + } + return j+1; + } + if( seen==0 ) return i+1; + return 0; + } + case JSONB_TEXT: { + j = i+n; + k = j+sz; + while( j=k ){ + return j+1; + }else if( strchr("\"\\/bfnrt",z[j+1])!=0 ){ + j++; + }else if( z[j+1]=='u' ){ + if( j+5>=k ) return j+1; + if( !jsonIs4Hex((const char*)&z[j+2]) ) return j+1; + j++; + }else if( x!=JSONB_TEXT5 ){ + return j+1; + }else{ + u32 c = 0; + u32 szC = jsonUnescapeOneChar((const char*)&z[j], k-j, &c); + if( c==JSON_INVALID_CHAR ) return j+1; + j += szC - 1; + } + } + j++; + } + return 0; + } + case JSONB_TEXTRAW: { + return 0; + } + case JSONB_ARRAY: { + u32 sub; + j = i+n; + k = j+sz; + while( jk ) return j+1; + sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1); + if( sub ) return sub; + j += n + sz; + } + assert( j==k ); + return 0; + } + case JSONB_OBJECT: { + u32 cnt = 0; + u32 sub; + j = i+n; + k = j+sz; + while( jk ) return j+1; + if( (cnt & 1)==0 ){ + x = z[j] & 0x0f; + if( xJSONB_TEXTRAW ) return j+1; + } + sub = jsonbValidityCheck(pParse, j, j+n+sz, iDepth+1); + if( sub ) return sub; + cnt++; + j += n + sz; + } + assert( j==k ); + if( (cnt & 1)!=0 ) return j+1; + return 0; + } + default: { + return i+1; + } + } +} + +/* +** Translate a single element of JSON text at pParse->zJson[i] into +** its equivalent binary JSONB representation. Append the translation into +** pParse->aBlob[] beginning at pParse->nBlob. The size of +** pParse->aBlob[] is increased as necessary. +** +** Return the index of the first character past the end of the element parsed, +** or one of the following special result codes: ** -** Return negative for a syntax error. Special cases: return -2 if the -** first non-whitespace character is '}' and return -3 if the first -** non-whitespace character is ']'. +** 0 End of input +** -1 Syntax error or OOM +** -2 '}' seen \ +** -3 ']' seen \___ For these returns, pParse->iErr is set to +** -4 ',' seen / the index in zJson[] of the seen character +** -5 ':' seen / */ -static int jsonParseValue(JsonParse *pParse, u32 i){ +static int jsonTranslateTextToBlob(JsonParse *pParse, u32 i){ char c; u32 j; - int iThis; + u32 iThis, iStart; int x; - JsonNode *pNode; + u8 t; const char *z = pParse->zJson; - while( fast_isspace(z[i]) ){ i++; } - if( (c = z[i])=='{' ){ +json_parse_restart: + switch( (u8)z[i] ){ + case '{': { /* Parse object */ - iThis = jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - if( iThis<0 ) return -1; + iThis = pParse->nBlob; + jsonBlobAppendNode(pParse, JSONB_OBJECT, pParse->nJson-i, 0); + if( ++pParse->iDepth > JSON_MAX_DEPTH ){ + pParse->iErr = i; + return -1; + } + iStart = pParse->nBlob; for(j=i+1;;j++){ - while( fast_isspace(z[j]) ){ j++; } - if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; - x = jsonParseValue(pParse, j); - if( x<0 ){ - pParse->iDepth--; - if( x==(-2) && pParse->nNode==(u32)iThis+1 ) return j+1; - return -1; + u32 iBlob = pParse->nBlob; + x = jsonTranslateTextToBlob(pParse, j); + if( x<=0 ){ + int op; + if( x==(-2) ){ + j = pParse->iErr; + if( pParse->nBlob!=(u32)iStart ) pParse->hasNonstd = 1; + break; + } + j += json5Whitespace(&z[j]); + op = JSONB_TEXT; + if( sqlite3JsonId1(z[j]) + || (z[j]=='\\' && jsonIs4HexB(&z[j+1], &op)) + ){ + int k = j+1; + while( (sqlite3JsonId2(z[k]) && json5Whitespace(&z[k])==0) + || (z[k]=='\\' && jsonIs4HexB(&z[k+1], &op)) + ){ + k++; + } + assert( iBlob==pParse->nBlob ); + jsonBlobAppendNode(pParse, op, k-j, &z[j]); + pParse->hasNonstd = 1; + x = k; + }else{ + if( x!=-1 ) pParse->iErr = j; + return -1; + } } if( pParse->oom ) return -1; - pNode = &pParse->aNode[pParse->nNode-1]; - if( pNode->eType!=JSON_STRING ) return -1; - pNode->jnFlags |= JNODE_LABEL; + t = pParse->aBlob[iBlob] & 0x0f; + if( tJSONB_TEXTRAW ){ + pParse->iErr = j; + return -1; + } j = x; - while( fast_isspace(z[j]) ){ j++; } - if( z[j]!=':' ) return -1; - j++; - x = jsonParseValue(pParse, j); - pParse->iDepth--; - if( x<0 ) return -1; + if( z[j]==':' ){ + j++; + }else{ + if( jsonIsspace(z[j]) ){ + /* strspn() is not helpful here */ + do{ j++; }while( jsonIsspace(z[j]) ); + if( z[j]==':' ){ + j++; + goto parse_object_value; + } + } + x = jsonTranslateTextToBlob(pParse, j); + if( x!=(-5) ){ + if( x!=(-1) ) pParse->iErr = j; + return -1; + } + j = pParse->iErr+1; + } + parse_object_value: + x = jsonTranslateTextToBlob(pParse, j); + if( x<=0 ){ + if( x!=(-1) ) pParse->iErr = j; + return -1; + } j = x; - while( fast_isspace(z[j]) ){ j++; } - c = z[j]; - if( c==',' ) continue; - if( c!='}' ) return -1; - break; + if( z[j]==',' ){ + continue; + }else if( z[j]=='}' ){ + break; + }else{ + if( jsonIsspace(z[j]) ){ + j += 1 + (u32)strspn(&z[j+1], jsonSpaces); + if( z[j]==',' ){ + continue; + }else if( z[j]=='}' ){ + break; + } + } + x = jsonTranslateTextToBlob(pParse, j); + if( x==(-4) ){ + j = pParse->iErr; + continue; + } + if( x==(-2) ){ + j = pParse->iErr; + break; + } + } + pParse->iErr = j; + return -1; } - pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart); + pParse->iDepth--; return j+1; - }else if( c=='[' ){ + } + case '[': { /* Parse array */ - iThis = jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); - if( iThis<0 ) return -1; - memset(&pParse->aNode[iThis].u, 0, sizeof(pParse->aNode[iThis].u)); + iThis = pParse->nBlob; + assert( i<=(u32)pParse->nJson ); + jsonBlobAppendNode(pParse, JSONB_ARRAY, pParse->nJson - i, 0); + iStart = pParse->nBlob; + if( pParse->oom ) return -1; + if( ++pParse->iDepth > JSON_MAX_DEPTH ){ + pParse->iErr = i; + return -1; + } for(j=i+1;;j++){ - while( fast_isspace(z[j]) ){ j++; } - if( ++pParse->iDepth > JSON_MAX_DEPTH ) return -1; - x = jsonParseValue(pParse, j); - pParse->iDepth--; - if( x<0 ){ - if( x==(-3) && pParse->nNode==(u32)iThis+1 ) return j+1; + x = jsonTranslateTextToBlob(pParse, j); + if( x<=0 ){ + if( x==(-3) ){ + j = pParse->iErr; + if( pParse->nBlob!=iStart ) pParse->hasNonstd = 1; + break; + } + if( x!=(-1) ) pParse->iErr = j; return -1; } j = x; - while( fast_isspace(z[j]) ){ j++; } - c = z[j]; - if( c==',' ) continue; - if( c!=']' ) return -1; - break; + if( z[j]==',' ){ + continue; + }else if( z[j]==']' ){ + break; + }else{ + if( jsonIsspace(z[j]) ){ + j += 1 + (u32)strspn(&z[j+1], jsonSpaces); + if( z[j]==',' ){ + continue; + }else if( z[j]==']' ){ + break; + } + } + x = jsonTranslateTextToBlob(pParse, j); + if( x==(-4) ){ + j = pParse->iErr; + continue; + } + if( x==(-3) ){ + j = pParse->iErr; + break; + } + } + pParse->iErr = j; + return -1; } - pParse->aNode[iThis].n = pParse->nNode - (u32)iThis - 1; + jsonBlobChangePayloadSize(pParse, iThis, pParse->nBlob - iStart); + pParse->iDepth--; return j+1; - }else if( c=='"' ){ + } + case '\'': { + u8 opcode; + char cDelim; + pParse->hasNonstd = 1; + opcode = JSONB_TEXT; + goto parse_string; + case '"': /* Parse string */ - u8 jnFlags = 0; + opcode = JSONB_TEXT; + parse_string: + cDelim = z[i]; j = i+1; - for(;;){ - c = z[j]; - if( (c & ~0x1f)==0 ){ - /* Control characters are not allowed in strings */ - return -1; + while( 1 /*exit-by-break*/ ){ + if( jsonIsOk[(u8)z[j]] ){ + if( !jsonIsOk[(u8)z[j+1]] ){ + j += 1; + }else if( !jsonIsOk[(u8)z[j+2]] ){ + j += 2; + }else{ + j += 3; + continue; + } } - if( c=='\\' ){ + c = z[j]; + if( c==cDelim ){ + break; + }else if( c=='\\' ){ c = z[++j]; if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f' || c=='n' || c=='r' || c=='t' - || (c=='u' && jsonIs4Hex(z+j+1)) ){ - jnFlags = JNODE_ESCAPE; + || (c=='u' && jsonIs4Hex(&z[j+1])) ){ + if( opcode==JSONB_TEXT ) opcode = JSONB_TEXTJ; + }else if( c=='\'' || c=='0' || c=='v' || c=='\n' + || (0xe2==(u8)c && 0x80==(u8)z[j+1] + && (0xa8==(u8)z[j+2] || 0xa9==(u8)z[j+2])) + || (c=='x' && jsonIs2Hex(&z[j+1])) ){ + opcode = JSONB_TEXT5; + pParse->hasNonstd = 1; + }else if( c=='\r' ){ + if( z[j+1]=='\n' ) j++; + opcode = JSONB_TEXT5; + pParse->hasNonstd = 1; }else{ + pParse->iErr = j; return -1; } + }else if( c<=0x1f ){ + /* Control characters are not allowed in strings */ + pParse->iErr = j; + return -1; }else if( c=='"' ){ - break; + opcode = JSONB_TEXT5; } j++; } - jsonParseAddNode(pParse, JSON_STRING, j+1-i, &z[i]); - if( !pParse->oom ) pParse->aNode[pParse->nNode-1].jnFlags = jnFlags; + jsonBlobAppendNode(pParse, opcode, j-1-i, &z[i+1]); return j+1; - }else if( c=='n' - && strncmp(z+i,"null",4)==0 - && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); - return i+4; - }else if( c=='t' - && strncmp(z+i,"true",4)==0 - && !sqlite3Isalnum(z[i+4]) ){ - jsonParseAddNode(pParse, JSON_TRUE, 0, 0); - return i+4; - }else if( c=='f' - && strncmp(z+i,"false",5)==0 - && !sqlite3Isalnum(z[i+5]) ){ - jsonParseAddNode(pParse, JSON_FALSE, 0, 0); - return i+5; - }else if( c=='-' || (c>='0' && c<='9') ){ + } + case 't': { + if( strncmp(z+i,"true",4)==0 && !sqlite3Isalnum(z[i+4]) ){ + jsonBlobAppendOneByte(pParse, JSONB_TRUE); + return i+4; + } + pParse->iErr = i; + return -1; + } + case 'f': { + if( strncmp(z+i,"false",5)==0 && !sqlite3Isalnum(z[i+5]) ){ + jsonBlobAppendOneByte(pParse, JSONB_FALSE); + return i+5; + } + pParse->iErr = i; + return -1; + } + case '+': { + u8 seenE; + pParse->hasNonstd = 1; + t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ + goto parse_number; + case '.': + if( sqlite3Isdigit(z[i+1]) ){ + pParse->hasNonstd = 1; + t = 0x03; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ + seenE = 0; + goto parse_number_2; + } + pParse->iErr = i; + return -1; + case '-': + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': /* Parse number */ - u8 seenDP = 0; - u8 seenE = 0; + t = 0x00; /* Bit 0x01: JSON5. Bit 0x02: FLOAT */ + parse_number: + seenE = 0; assert( '-' < '0' ); + assert( '+' < '0' ); + assert( '.' < '0' ); + c = z[i]; + if( c<='0' ){ - j = c=='-' ? i+1 : i; - if( z[j]=='0' && z[j+1]>='0' && z[j+1]<='9' ) return -1; + if( c=='0' ){ + if( (z[i+1]=='x' || z[i+1]=='X') && sqlite3Isxdigit(z[i+2]) ){ + assert( t==0x00 ); + pParse->hasNonstd = 1; + t = 0x01; + for(j=i+3; sqlite3Isxdigit(z[j]); j++){} + goto parse_number_finish; + }else if( sqlite3Isdigit(z[i+1]) ){ + pParse->iErr = i+1; + return -1; + } + }else{ + if( !sqlite3Isdigit(z[i+1]) ){ + /* JSON5 allows for "+Infinity" and "-Infinity" using exactly + ** that case. SQLite also allows these in any case and it allows + ** "+inf" and "-inf". */ + if( (z[i+1]=='I' || z[i+1]=='i') + && sqlite3StrNICmp(&z[i+1], "inf",3)==0 + ){ + pParse->hasNonstd = 1; + if( z[i]=='-' ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999"); + }else{ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); + } + return i + (sqlite3StrNICmp(&z[i+4],"inity",5)==0 ? 9 : 4); + } + if( z[i+1]=='.' ){ + pParse->hasNonstd = 1; + t |= 0x01; + goto parse_number_2; + } + pParse->iErr = i; + return -1; + } + if( z[i+1]=='0' ){ + if( sqlite3Isdigit(z[i+2]) ){ + pParse->iErr = i+1; + return -1; + }else if( (z[i+2]=='x' || z[i+2]=='X') && sqlite3Isxdigit(z[i+3]) ){ + pParse->hasNonstd = 1; + t |= 0x01; + for(j=i+4; sqlite3Isxdigit(z[j]); j++){} + goto parse_number_finish; + } + } + } } - j = i+1; - for(;; j++){ + + parse_number_2: + for(j=i+1;; j++){ c = z[j]; - if( c>='0' && c<='9' ) continue; + if( sqlite3Isdigit(c) ) continue; if( c=='.' ){ - if( z[j-1]=='-' ) return -1; - if( seenDP ) return -1; - seenDP = 1; + if( (t & 0x02)!=0 ){ + pParse->iErr = j; + return -1; + } + t |= 0x02; continue; } if( c=='e' || c=='E' ){ - if( z[j-1]<'0' ) return -1; - if( seenE ) return -1; - seenDP = seenE = 1; + if( z[j-1]<'0' ){ + if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ + pParse->hasNonstd = 1; + t |= 0x01; + }else{ + pParse->iErr = j; + return -1; + } + } + if( seenE ){ + pParse->iErr = j; + return -1; + } + t |= 0x02; + seenE = 1; c = z[j+1]; if( c=='+' || c=='-' ){ j++; c = z[j+1]; } - if( c<'0' || c>'9' ) return -1; + if( c<'0' || c>'9' ){ + pParse->iErr = j; + return -1; + } continue; } break; } - if( z[j-1]<'0' ) return -1; - jsonParseAddNode(pParse, seenDP ? JSON_REAL : JSON_INT, - j - i, &z[i]); + if( z[j-1]<'0' ){ + if( ALWAYS(z[j-1]=='.') && ALWAYS(j-2>=i) && sqlite3Isdigit(z[j-2]) ){ + pParse->hasNonstd = 1; + t |= 0x01; + }else{ + pParse->iErr = j; + return -1; + } + } + parse_number_finish: + assert( JSONB_INT+0x01==JSONB_INT5 ); + assert( JSONB_FLOAT+0x01==JSONB_FLOAT5 ); + assert( JSONB_INT+0x02==JSONB_FLOAT ); + if( z[i]=='+' ) i++; + jsonBlobAppendNode(pParse, JSONB_INT+t, j-i, &z[i]); return j; - }else if( c=='}' ){ + } + case '}': { + pParse->iErr = i; return -2; /* End of {...} */ - }else if( c==']' ){ + } + case ']': { + pParse->iErr = i; return -3; /* End of [...] */ - }else if( c==0 ){ + } + case ',': { + pParse->iErr = i; + return -4; /* List separator */ + } + case ':': { + pParse->iErr = i; + return -5; /* Object label/value separator */ + } + case 0: { return 0; /* End of file */ - }else{ + } + case 0x09: + case 0x0a: + case 0x0d: + case 0x20: { + i += 1 + (u32)strspn(&z[i+1], jsonSpaces); + goto json_parse_restart; + } + case 0x0b: + case 0x0c: + case '/': + case 0xc2: + case 0xe1: + case 0xe2: + case 0xe3: + case 0xef: { + j = json5Whitespace(&z[i]); + if( j>0 ){ + i += j; + pParse->hasNonstd = 1; + goto json_parse_restart; + } + pParse->iErr = i; + return -1; + } + case 'n': { + if( strncmp(z+i,"null",4)==0 && !sqlite3Isalnum(z[i+4]) ){ + jsonBlobAppendOneByte(pParse, JSONB_NULL); + return i+4; + } + /* fall-through into the default case that checks for NaN */ + } + default: { + u32 k; + int nn; + c = z[i]; + for(k=0; khasNonstd = 1; + return i + nn; + } + pParse->iErr = i; return -1; /* Syntax error */ } + } /* End switch(z[i]) */ } + /* ** Parse a complete JSON string. Return 0 on success or non-zero if there -** are any errors. If an error occurs, free all memory associated with -** pParse. +** are any errors. If an error occurs, free all memory held by pParse, +** but not pParse itself. ** -** pParse is uninitialized when this routine is called. +** pParse must be initialized to an empty parse object prior to calling +** this routine. */ -static int jsonParse( +static int jsonConvertTextToBlob( JsonParse *pParse, /* Initialize and fill this JsonParse object */ - sqlite3_context *pCtx, /* Report errors here */ - const char *zJson /* Input JSON text to be parsed */ + sqlite3_context *pCtx /* Report errors here */ ){ int i; - memset(pParse, 0, sizeof(*pParse)); - if( zJson==0 ) return 1; - pParse->zJson = zJson; - i = jsonParseValue(pParse, 0); + const char *zJson = pParse->zJson; + i = jsonTranslateTextToBlob(pParse, 0); if( pParse->oom ) i = -1; if( i>0 ){ +#ifdef SQLITE_DEBUG assert( pParse->iDepth==0 ); - while( fast_isspace(zJson[i]) ) i++; - if( zJson[i] ) i = -1; + if( sqlite3Config.bJsonSelfcheck ){ + assert( jsonbValidityCheck(pParse, 0, pParse->nBlob, 0)==0 ); + } +#endif + while( jsonIsspace(zJson[i]) ) i++; + if( zJson[i] ){ + i += json5Whitespace(&zJson[i]); + if( zJson[i] ){ + if( pCtx ) sqlite3_result_error(pCtx, "malformed JSON", -1); + jsonParseReset(pParse); + return 1; + } + pParse->hasNonstd = 1; + } } if( i<=0 ){ if( pCtx!=0 ){ @@ -201804,161 +210940,719 @@ static int jsonParse( return 0; } -/* Mark node i of pParse as being a child of iParent. Call recursively -** to fill in all the descendants of node i. +/* +** The input string pStr is a well-formed JSON text string. Convert +** this into the JSONB format and make it the return value of the +** SQL function. */ -static void jsonParseFillInParentage(JsonParse *pParse, u32 i, u32 iParent){ - JsonNode *pNode = &pParse->aNode[i]; - u32 j; - pParse->aUp[i] = iParent; - switch( pNode->eType ){ - case JSON_ARRAY: { - for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j)){ - jsonParseFillInParentage(pParse, i+j, i); +static void jsonReturnStringAsBlob(JsonString *pStr){ + JsonParse px; + memset(&px, 0, sizeof(px)); + jsonStringTerminate(pStr); + if( pStr->eErr ){ + sqlite3_result_error_nomem(pStr->pCtx); + return; + } + px.zJson = pStr->zBuf; + px.nJson = pStr->nUsed; + px.db = sqlite3_context_db_handle(pStr->pCtx); + (void)jsonTranslateTextToBlob(&px, 0); + if( px.oom ){ + sqlite3DbFree(px.db, px.aBlob); + sqlite3_result_error_nomem(pStr->pCtx); + }else{ + assert( px.nBlobAlloc>0 ); + assert( !px.bReadOnly ); + sqlite3_result_blob(pStr->pCtx, px.aBlob, px.nBlob, SQLITE_DYNAMIC); + } +} + +/* The byte at index i is a node type-code. This routine +** determines the payload size for that node and writes that +** payload size in to *pSz. It returns the offset from i to the +** beginning of the payload. Return 0 on error. +*/ +static u32 jsonbPayloadSize(const JsonParse *pParse, u32 i, u32 *pSz){ + u8 x; + u32 sz; + u32 n; + if( NEVER(i>pParse->nBlob) ){ + *pSz = 0; + return 0; + } + x = pParse->aBlob[i]>>4; + if( x<=11 ){ + sz = x; + n = 1; + }else if( x==12 ){ + if( i+1>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = pParse->aBlob[i+1]; + n = 2; + }else if( x==13 ){ + if( i+2>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = (pParse->aBlob[i+1]<<8) + pParse->aBlob[i+2]; + n = 3; + }else if( x==14 ){ + if( i+4>=pParse->nBlob ){ + *pSz = 0; + return 0; + } + sz = ((u32)pParse->aBlob[i+1]<<24) + (pParse->aBlob[i+2]<<16) + + (pParse->aBlob[i+3]<<8) + pParse->aBlob[i+4]; + n = 5; + }else{ + if( i+8>=pParse->nBlob + || pParse->aBlob[i+1]!=0 + || pParse->aBlob[i+2]!=0 + || pParse->aBlob[i+3]!=0 + || pParse->aBlob[i+4]!=0 + ){ + *pSz = 0; + return 0; + } + sz = (pParse->aBlob[i+5]<<24) + (pParse->aBlob[i+6]<<16) + + (pParse->aBlob[i+7]<<8) + pParse->aBlob[i+8]; + n = 9; + } + if( (i64)i+sz+n > pParse->nBlob + && (i64)i+sz+n > pParse->nBlob-pParse->delta + ){ + sz = 0; + n = 0; + } + *pSz = sz; + return n; +} + + +/* +** Translate the binary JSONB representation of JSON beginning at +** pParse->aBlob[i] into a JSON text string. Append the JSON +** text onto the end of pOut. Return the index in pParse->aBlob[] +** of the first byte past the end of the element that is translated. +** +** If an error is detected in the BLOB input, the pOut->eErr flag +** might get set to JSTRING_MALFORMED. But not all BLOB input errors +** are detected. So a malformed JSONB input might either result +** in an error, or in incorrect JSON. +** +** The pOut->eErr JSTRING_OOM flag is set on a OOM. +*/ +static u32 jsonTranslateBlobToText( + const JsonParse *pParse, /* the complete parse of the JSON */ + u32 i, /* Start rendering at this index */ + JsonString *pOut /* Write JSON here */ +){ + u32 sz, n, j, iEnd; + + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + pOut->eErr |= JSTRING_MALFORMED; + return pParse->nBlob+1; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_NULL: { + jsonAppendRawNZ(pOut, "null", 4); + return i+1; + } + case JSONB_TRUE: { + jsonAppendRawNZ(pOut, "true", 4); + return i+1; + } + case JSONB_FALSE: { + jsonAppendRawNZ(pOut, "false", 5); + return i+1; + } + case JSONB_INT: + case JSONB_FLOAT: { + if( sz==0 ) goto malformed_jsonb; + jsonAppendRaw(pOut, (const char*)&pParse->aBlob[i+n], sz); + break; + } + case JSONB_INT5: { /* Integer literal in hexadecimal notation */ + u32 k = 2; + sqlite3_uint64 u = 0; + const char *zIn = (const char*)&pParse->aBlob[i+n]; + int bOverflow = 0; + if( sz==0 ) goto malformed_jsonb; + if( zIn[0]=='-' ){ + jsonAppendChar(pOut, '-'); + k++; + }else if( zIn[0]=='+' ){ + k++; + } + for(; keErr |= JSTRING_MALFORMED; + break; + }else if( (u>>60)!=0 ){ + bOverflow = 1; + }else{ + u = u*16 + sqlite3HexToInt(zIn[k]); + } } + jsonPrintf(100,pOut,bOverflow?"9.0e999":"%llu", u); break; } - case JSON_OBJECT: { - for(j=1; j<=pNode->n; j += jsonNodeSize(pNode+j+1)+1){ - pParse->aUp[i+j] = i; - jsonParseFillInParentage(pParse, i+j+1, i); + case JSONB_FLOAT5: { /* Float literal missing digits beside "." */ + u32 k = 0; + const char *zIn = (const char*)&pParse->aBlob[i+n]; + if( sz==0 ) goto malformed_jsonb; + if( zIn[0]=='-' ){ + jsonAppendChar(pOut, '-'); + k++; + } + if( zIn[k]=='.' ){ + jsonAppendChar(pOut, '0'); } + for(; kaBlob[i+n], sz); + jsonAppendChar(pOut, '"'); break; } + case JSONB_TEXT5: { + const char *zIn; + u32 k; + u32 sz2 = sz; + zIn = (const char*)&pParse->aBlob[i+n]; + jsonAppendChar(pOut, '"'); + while( sz2>0 ){ + for(k=0; k0 ){ + jsonAppendRawNZ(pOut, zIn, k); + if( k>=sz2 ){ + break; + } + zIn += k; + sz2 -= k; + } + if( zIn[0]=='"' ){ + jsonAppendRawNZ(pOut, "\\\"", 2); + zIn++; + sz2--; + continue; + } + assert( zIn[0]=='\\' ); + assert( sz2>=1 ); + if( sz2<2 ){ + pOut->eErr |= JSTRING_MALFORMED; + break; + } + switch( (u8)zIn[1] ){ + case '\'': + jsonAppendChar(pOut, '\''); + break; + case 'v': + jsonAppendRawNZ(pOut, "\\u0009", 6); + break; + case 'x': + if( sz2<4 ){ + pOut->eErr |= JSTRING_MALFORMED; + sz2 = 2; + break; + } + jsonAppendRawNZ(pOut, "\\u00", 4); + jsonAppendRawNZ(pOut, &zIn[2], 2); + zIn += 2; + sz2 -= 2; + break; + case '0': + jsonAppendRawNZ(pOut, "\\u0000", 6); + break; + case '\r': + if( sz2>2 && zIn[2]=='\n' ){ + zIn++; + sz2--; + } + break; + case '\n': + break; + case 0xe2: + /* '\' followed by either U+2028 or U+2029 is ignored as + ** whitespace. Not that in UTF8, U+2028 is 0xe2 0x80 0x29. + ** U+2029 is the same except for the last byte */ + if( sz2<4 + || 0x80!=(u8)zIn[2] + || (0xa8!=(u8)zIn[3] && 0xa9!=(u8)zIn[3]) + ){ + pOut->eErr |= JSTRING_MALFORMED; + sz2 = 2; + break; + } + zIn += 2; + sz2 -= 2; + break; + default: + jsonAppendRawNZ(pOut, zIn, 2); + break; + } + assert( sz2>=2 ); + zIn += 2; + sz2 -= 2; + } + jsonAppendChar(pOut, '"'); + break; + } + case JSONB_TEXTRAW: { + jsonAppendString(pOut, (const char*)&pParse->aBlob[i+n], sz); + break; + } + case JSONB_ARRAY: { + jsonAppendChar(pOut, '['); + j = i+n; + iEnd = j+sz; + while( jeErr==0 ){ + j = jsonTranslateBlobToText(pParse, j, pOut); + jsonAppendChar(pOut, ','); + } + if( j>iEnd ) pOut->eErr |= JSTRING_MALFORMED; + if( sz>0 ) jsonStringTrimOneChar(pOut); + jsonAppendChar(pOut, ']'); + break; + } + case JSONB_OBJECT: { + int x = 0; + jsonAppendChar(pOut, '{'); + j = i+n; + iEnd = j+sz; + while( jeErr==0 ){ + j = jsonTranslateBlobToText(pParse, j, pOut); + jsonAppendChar(pOut, (x++ & 1) ? ',' : ':'); + } + if( (x & 1)!=0 || j>iEnd ) pOut->eErr |= JSTRING_MALFORMED; + if( sz>0 ) jsonStringTrimOneChar(pOut); + jsonAppendChar(pOut, '}'); + break; + } + default: { + malformed_jsonb: + pOut->eErr |= JSTRING_MALFORMED; break; } } + return i+n+sz; +} + +/* Return true if the input pJson +** +** For performance reasons, this routine does not do a detailed check of the +** input BLOB to ensure that it is well-formed. Hence, false positives are +** possible. False negatives should never occur, however. +*/ +static int jsonFuncArgMightBeBinary(sqlite3_value *pJson){ + u32 sz, n; + const u8 *aBlob; + int nBlob; + JsonParse s; + if( sqlite3_value_type(pJson)!=SQLITE_BLOB ) return 0; + aBlob = sqlite3_value_blob(pJson); + nBlob = sqlite3_value_bytes(pJson); + if( nBlob<1 ) return 0; + if( NEVER(aBlob==0) || (aBlob[0] & 0x0f)>JSONB_OBJECT ) return 0; + memset(&s, 0, sizeof(s)); + s.aBlob = (u8*)aBlob; + s.nBlob = nBlob; + n = jsonbPayloadSize(&s, 0, &sz); + if( n==0 ) return 0; + if( sz+n!=(u32)nBlob ) return 0; + if( (aBlob[0] & 0x0f)<=JSONB_FALSE && sz>0 ) return 0; + return sz+n==(u32)nBlob; } /* -** Compute the parentage of all nodes in a completed parse. +** Given that a JSONB_ARRAY object starts at offset i, return +** the number of entries in that array. */ -static int jsonParseFindParents(JsonParse *pParse){ - u32 *aUp; - assert( pParse->aUp==0 ); - aUp = pParse->aUp = sqlite3_malloc64( sizeof(u32)*pParse->nNode ); - if( aUp==0 ){ - pParse->oom = 1; - return SQLITE_NOMEM; +static u32 jsonbArrayCount(JsonParse *pParse, u32 iRoot){ + u32 n, sz, i, iEnd; + u32 k = 0; + n = jsonbPayloadSize(pParse, iRoot, &sz); + iEnd = iRoot+n+sz; + for(i=iRoot+n; n>0 && idelta. */ -#define JSON_CACHE_ID (-429938) /* First cache entry */ -#define JSON_CACHE_SZ 4 /* Max number of cache entries */ +static void jsonAfterEditSizeAdjust(JsonParse *pParse, u32 iRoot){ + u32 sz = 0; + u32 nBlob; + assert( pParse->delta!=0 ); + assert( pParse->nBlobAlloc >= pParse->nBlob ); + nBlob = pParse->nBlob; + pParse->nBlob = pParse->nBlobAlloc; + (void)jsonbPayloadSize(pParse, iRoot, &sz); + pParse->nBlob = nBlob; + sz += pParse->delta; + pParse->delta += jsonBlobChangePayloadSize(pParse, iRoot, sz); +} /* -** Obtain a complete parse of the JSON found in the first argument -** of the argv array. Use the sqlite3_get_auxdata() cache for this -** parse if it is available. If the cache is not available or if it -** is no longer valid, parse the JSON again and return the new parse, -** and also register the new parse so that it will be available for -** future sqlite3_get_auxdata() calls. +** Modify the JSONB blob at pParse->aBlob by removing nDel bytes of +** content beginning at iDel, and replacing them with nIns bytes of +** content given by aIns. +** +** nDel may be zero, in which case no bytes are removed. But iDel is +** still important as new bytes will be insert beginning at iDel. +** +** aIns may be zero, in which case space is created to hold nIns bytes +** beginning at iDel, but that space is uninitialized. +** +** Set pParse->oom if an OOM occurs. */ -static JsonParse *jsonParseCached( - sqlite3_context *pCtx, - sqlite3_value **argv, - sqlite3_context *pErrCtx +static void jsonBlobEdit( + JsonParse *pParse, /* The JSONB to be modified is in pParse->aBlob */ + u32 iDel, /* First byte to be removed */ + u32 nDel, /* Number of bytes to remove */ + const u8 *aIns, /* Content to insert */ + u32 nIns /* Bytes of content to insert */ ){ - const char *zJson = (const char*)sqlite3_value_text(argv[0]); - int nJson = sqlite3_value_bytes(argv[0]); - JsonParse *p; - JsonParse *pMatch = 0; - int iKey; - int iMinKey = 0; - u32 iMinHold = 0xffffffff; - u32 iMaxHold = 0; - if( zJson==0 ) return 0; - for(iKey=0; iKeynBlob + d > pParse->nBlobAlloc ){ + jsonBlobExpand(pParse, pParse->nBlob+d); + if( pParse->oom ) return; } - if( pMatch==0 - && p->nJson==nJson - && memcmp(p->zJson,zJson,nJson)==0 - ){ - p->nErr = 0; - pMatch = p; - }else if( p->iHoldiHold; - iMinKey = iKey; + memmove(&pParse->aBlob[iDel+nIns], + &pParse->aBlob[iDel+nDel], + pParse->nBlob - (iDel+nDel)); + pParse->nBlob += d; + pParse->delta += d; + } + if( nIns && aIns ) memcpy(&pParse->aBlob[iDel], aIns, nIns); +} + +/* +** Return the number of escaped newlines to be ignored. +** An escaped newline is a one of the following byte sequences: +** +** 0x5c 0x0a +** 0x5c 0x0d +** 0x5c 0x0d 0x0a +** 0x5c 0xe2 0x80 0xa8 +** 0x5c 0xe2 0x80 0xa9 +*/ +static u32 jsonBytesToBypass(const char *z, u32 n){ + u32 i = 0; + while( i+1iHold>iMaxHold ){ - iMaxHold = p->iHold; + if( 0xe2==(u8)z[i+1] + && i+3nErr = 0; - pMatch->iHold = iMaxHold+1; - return pMatch; + return i; +} + +/* +** Input z[0..n] defines JSON escape sequence including the leading '\\'. +** Decode that escape sequence into a single character. Write that +** character into *piOut. Return the number of bytes in the escape sequence. +** +** If there is a syntax error of some kind (for example too few characters +** after the '\\' to complete the encoding) then *piOut is set to +** JSON_INVALID_CHAR. +*/ +static u32 jsonUnescapeOneChar(const char *z, u32 n, u32 *piOut){ + assert( n>0 ); + assert( z[0]=='\\' ); + if( n<2 ){ + *piOut = JSON_INVALID_CHAR; + return n; } - p = sqlite3_malloc64( sizeof(*p) + nJson + 1 ); - if( p==0 ){ - sqlite3_result_error_nomem(pCtx); - return 0; + switch( (u8)z[1] ){ + case 'u': { + u32 v, vlo; + if( n<6 ){ + *piOut = JSON_INVALID_CHAR; + return n; + } + v = jsonHexToInt4(&z[2]); + if( (v & 0xfc00)==0xd800 + && n>=12 + && z[6]=='\\' + && z[7]=='u' + && ((vlo = jsonHexToInt4(&z[8]))&0xfc00)==0xdc00 + ){ + *piOut = ((v&0x3ff)<<10) + (vlo&0x3ff) + 0x10000; + return 12; + }else{ + *piOut = v; + return 6; + } + } + case 'b': { *piOut = '\b'; return 2; } + case 'f': { *piOut = '\f'; return 2; } + case 'n': { *piOut = '\n'; return 2; } + case 'r': { *piOut = '\r'; return 2; } + case 't': { *piOut = '\t'; return 2; } + case 'v': { *piOut = '\v'; return 2; } + case '0': { *piOut = 0; return 2; } + case '\'': + case '"': + case '/': + case '\\':{ *piOut = z[1]; return 2; } + case 'x': { + if( n<4 ){ + *piOut = JSON_INVALID_CHAR; + return n; + } + *piOut = (jsonHexToInt(z[2])<<4) | jsonHexToInt(z[3]); + return 4; + } + case 0xe2: + case '\r': + case '\n': { + u32 nSkip = jsonBytesToBypass(z, n); + if( nSkip==0 ){ + *piOut = JSON_INVALID_CHAR; + return n; + }else if( nSkip==n ){ + *piOut = 0; + return n; + }else if( z[nSkip]=='\\' ){ + return nSkip + jsonUnescapeOneChar(&z[nSkip], n-nSkip, piOut); + }else{ + int sz = sqlite3Utf8ReadLimited((u8*)&z[nSkip], n-nSkip, piOut); + return nSkip + sz; + } + } + default: { + *piOut = JSON_INVALID_CHAR; + return 2; + } } - memset(p, 0, sizeof(*p)); - p->zJson = (char*)&p[1]; - memcpy((char*)p->zJson, zJson, nJson+1); - if( jsonParse(p, pErrCtx, p->zJson) ){ - sqlite3_free(p); - return 0; +} + + +/* +** Compare two object labels. Return 1 if they are equal and +** 0 if they differ. +** +** In this version, we know that one or the other or both of the +** two comparands contains an escape sequence. +*/ +static SQLITE_NOINLINE int jsonLabelCompareEscaped( + const char *zLeft, /* The left label */ + u32 nLeft, /* Size of the left label in bytes */ + int rawLeft, /* True if zLeft contains no escapes */ + const char *zRight, /* The right label */ + u32 nRight, /* Size of the right label in bytes */ + int rawRight /* True if zRight is escape-free */ +){ + u32 cLeft, cRight; + assert( rawLeft==0 || rawRight==0 ); + while( 1 /*exit-by-return*/ ){ + if( nLeft==0 ){ + cLeft = 0; + }else if( rawLeft || zLeft[0]!='\\' ){ + cLeft = ((u8*)zLeft)[0]; + if( cLeft>=0xc0 ){ + int sz = sqlite3Utf8ReadLimited((u8*)zLeft, nLeft, &cLeft); + zLeft += sz; + nLeft -= sz; + }else{ + zLeft++; + nLeft--; + } + }else{ + u32 n = jsonUnescapeOneChar(zLeft, nLeft, &cLeft); + zLeft += n; + assert( n<=nLeft ); + nLeft -= n; + } + if( nRight==0 ){ + cRight = 0; + }else if( rawRight || zRight[0]!='\\' ){ + cRight = ((u8*)zRight)[0]; + if( cRight>=0xc0 ){ + int sz = sqlite3Utf8ReadLimited((u8*)zRight, nRight, &cRight); + zRight += sz; + nRight -= sz; + }else{ + zRight++; + nRight--; + } + }else{ + u32 n = jsonUnescapeOneChar(zRight, nRight, &cRight); + zRight += n; + assert( n<=nRight ); + nRight -= n; + } + if( cLeft!=cRight ) return 0; + if( cLeft==0 ) return 1; } - p->nJson = nJson; - p->iHold = iMaxHold+1; - sqlite3_set_auxdata(pCtx, JSON_CACHE_ID+iMinKey, p, - (void(*)(void*))jsonParseFree); - return (JsonParse*)sqlite3_get_auxdata(pCtx, JSON_CACHE_ID+iMinKey); } /* -** Compare the OBJECT label at pNode against zKey,nKey. Return true on -** a match. +** Compare two object labels. Return 1 if they are equal and +** 0 if they differ. Return -1 if an OOM occurs. */ -static int jsonLabelCompare(JsonNode *pNode, const char *zKey, u32 nKey){ - assert( pNode->eU==1 ); - if( pNode->jnFlags & JNODE_RAW ){ - if( pNode->n!=nKey ) return 0; - return strncmp(pNode->u.zJContent, zKey, nKey)==0; +static int jsonLabelCompare( + const char *zLeft, /* The left label */ + u32 nLeft, /* Size of the left label in bytes */ + int rawLeft, /* True if zLeft contains no escapes */ + const char *zRight, /* The right label */ + u32 nRight, /* Size of the right label in bytes */ + int rawRight /* True if zRight is escape-free */ +){ + if( rawLeft && rawRight ){ + /* Simpliest case: Neither label contains escapes. A simple + ** memcmp() is sufficient. */ + if( nLeft!=nRight ) return 0; + return memcmp(zLeft, zRight, nLeft)==0; }else{ - if( pNode->n!=nKey+2 ) return 0; - return strncmp(pNode->u.zJContent+1, zKey, nKey)==0; + return jsonLabelCompareEscaped(zLeft, nLeft, rawLeft, + zRight, nRight, rawRight); } } -/* forward declaration */ -static JsonNode *jsonLookupAppend(JsonParse*,const char*,int*,const char**); +/* +** Error returns from jsonLookupStep() +*/ +#define JSON_LOOKUP_ERROR 0xffffffff +#define JSON_LOOKUP_NOTFOUND 0xfffffffe +#define JSON_LOOKUP_PATHERROR 0xfffffffd +#define JSON_LOOKUP_ISERROR(x) ((x)>=JSON_LOOKUP_PATHERROR) + +/* Forward declaration */ +static u32 jsonLookupStep(JsonParse*,u32,const char*,u32); + + +/* This helper routine for jsonLookupStep() populates pIns with +** binary data that is to be inserted into pParse. +** +** In the common case, pIns just points to pParse->aIns and pParse->nIns. +** But if the zPath of the original edit operation includes path elements +** that go deeper, additional substructure must be created. +** +** For example: +** +** json_insert('{}', '$.a.b.c', 123); +** +** The search stops at '$.a' But additional substructure must be +** created for the ".b.c" part of the patch so that the final result +** is: {"a":{"b":{"c"::123}}}. This routine populates pIns with +** the binary equivalent of {"b":{"c":123}} so that it can be inserted. +** +** The caller is responsible for resetting pIns when it has finished +** using the substructure. +*/ +static u32 jsonCreateEditSubstructure( + JsonParse *pParse, /* The original JSONB that is being edited */ + JsonParse *pIns, /* Populate this with the blob data to insert */ + const char *zTail /* Tail of the path that determins substructure */ +){ + static const u8 emptyObject[] = { JSONB_ARRAY, JSONB_OBJECT }; + int rc; + memset(pIns, 0, sizeof(*pIns)); + pIns->db = pParse->db; + if( zTail[0]==0 ){ + /* No substructure. Just insert what is given in pParse. */ + pIns->aBlob = pParse->aIns; + pIns->nBlob = pParse->nIns; + rc = 0; + }else{ + /* Construct the binary substructure */ + pIns->nBlob = 1; + pIns->aBlob = (u8*)&emptyObject[zTail[0]=='.']; + pIns->eEdit = pParse->eEdit; + pIns->nIns = pParse->nIns; + pIns->aIns = pParse->aIns; + rc = jsonLookupStep(pIns, 0, zTail, 0); + pParse->oom |= pIns->oom; + } + return rc; /* Error code only */ +} /* -** Search along zPath to find the node specified. Return a pointer -** to that node, or NULL if zPath is malformed or if there is no such -** node. +** Search along zPath to find the Json element specified. Return an +** index into pParse->aBlob[] for the start of that element's value. +** +** If the value found by this routine is the value half of label/value pair +** within an object, then set pPath->iLabel to the start of the corresponding +** label, before returning. ** -** If pApnd!=0, then try to append new nodes to complete zPath if it is -** possible to do so and if no existing node corresponds to zPath. If -** new nodes are appended *pApnd is set to 1. +** Return one of the JSON_LOOKUP error codes if problems are seen. +** +** This routine will also modify the blob. If pParse->eEdit is one of +** JEDIT_DEL, JEDIT_REPL, JEDIT_INS, or JEDIT_SET, then changes might be +** made to the selected value. If an edit is performed, then the return +** value does not necessarily point to the select element. If an edit +** is performed, the return value is only useful for detecting error +** conditions. */ -static JsonNode *jsonLookupStep( +static u32 jsonLookupStep( JsonParse *pParse, /* The JSON to search */ - u32 iRoot, /* Begin the search at this node */ + u32 iRoot, /* Begin the search at this element of aBlob[] */ const char *zPath, /* The path to search */ - int *pApnd, /* Append nodes to complete path if not NULL */ - const char **pzErr /* Make *pzErr point to any syntax error in zPath */ + u32 iLabel /* Label if iRoot is a value of in an object */ ){ - u32 i, j, nKey; + u32 i, j, k, nKey, sz, n, iEnd, rc; const char *zKey; - JsonNode *pRoot = &pParse->aNode[iRoot]; - if( zPath[0]==0 ) return pRoot; - if( pRoot->jnFlags & JNODE_REPLACE ) return 0; + u8 x; + + if( zPath[0]==0 ){ + if( pParse->eEdit && jsonBlobMakeEditable(pParse, pParse->nIns) ){ + n = jsonbPayloadSize(pParse, iRoot, &sz); + sz += n; + if( pParse->eEdit==JEDIT_DEL ){ + if( iLabel>0 ){ + sz += iRoot - iLabel; + iRoot = iLabel; + } + jsonBlobEdit(pParse, iRoot, sz, 0, 0); + }else if( pParse->eEdit==JEDIT_INS ){ + /* Already exists, so json_insert() is a no-op */ + }else{ + /* json_set() or json_replace() */ + jsonBlobEdit(pParse, iRoot, sz, pParse->aIns, pParse->nIns); + } + } + pParse->iLabel = iLabel; + return iRoot; + } if( zPath[0]=='.' ){ - if( pRoot->eType!=JSON_OBJECT ) return 0; + int rawKey = 1; + x = pParse->aBlob[iRoot]; zPath++; if( zPath[0]=='"' ){ zKey = zPath + 1; @@ -201967,303 +211661,850 @@ static JsonNode *jsonLookupStep( if( zPath[i] ){ i++; }else{ - *pzErr = zPath; - return 0; + return JSON_LOOKUP_PATHERROR; } testcase( nKey==0 ); + rawKey = memchr(zKey, '\\', nKey)==0; }else{ zKey = zPath; for(i=0; zPath[i] && zPath[i]!='.' && zPath[i]!='['; i++){} nKey = i; if( nKey==0 ){ - *pzErr = zPath; - return 0; - } - } - j = 1; - for(;;){ - while( j<=pRoot->n ){ - if( jsonLabelCompare(pRoot+j, zKey, nKey) ){ - return jsonLookupStep(pParse, iRoot+j+1, &zPath[i], pApnd, pzErr); - } - j++; - j += jsonNodeSize(&pRoot[j]); + return JSON_LOOKUP_PATHERROR; + } + } + if( (x & 0x0f)!=JSONB_OBJECT ) return JSON_LOOKUP_NOTFOUND; + n = jsonbPayloadSize(pParse, iRoot, &sz); + j = iRoot + n; /* j is the index of a label */ + iEnd = j+sz; + while( jaBlob[j] & 0x0f; + if( xJSONB_TEXTRAW ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + k = j+n; /* k is the index of the label text */ + if( k+sz>=iEnd ) return JSON_LOOKUP_ERROR; + zLabel = (const char*)&pParse->aBlob[k]; + rawLabel = x==JSONB_TEXT || x==JSONB_TEXTRAW; + if( jsonLabelCompare(zKey, nKey, rawKey, zLabel, sz, rawLabel) ){ + u32 v = k+sz; /* v is the index of the value */ + if( ((pParse->aBlob[v])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, v, &sz); + if( n==0 || v+n+sz>iEnd ) return JSON_LOOKUP_ERROR; + assert( j>0 ); + rc = jsonLookupStep(pParse, v, &zPath[i], j); + if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; } - if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; - assert( pRoot->eU==2 ); - iRoot += pRoot->u.iAppend; - pRoot = &pParse->aNode[iRoot]; - j = 1; - } - if( pApnd ){ - u32 iStart, iLabel; - JsonNode *pNode; - iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0); - iLabel = jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); - zPath += i; - pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr); - if( pParse->oom ) return 0; - if( pNode ){ - pRoot = &pParse->aNode[iRoot]; - assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart - iRoot; - pRoot->jnFlags |= JNODE_APPEND; - VVA( pRoot->eU = 2 ); - pParse->aNode[iLabel].jnFlags |= JNODE_RAW; - } - return pNode; + j = k+sz; + if( ((pParse->aBlob[j])&0x0f)>JSONB_OBJECT ) return JSON_LOOKUP_ERROR; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + j += n+sz; + } + if( j>iEnd ) return JSON_LOOKUP_ERROR; + if( pParse->eEdit>=JEDIT_INS ){ + u32 nIns; /* Total bytes to insert (label+value) */ + JsonParse v; /* BLOB encoding of the value to be inserted */ + JsonParse ix; /* Header of the label to be inserted */ + testcase( pParse->eEdit==JEDIT_INS ); + testcase( pParse->eEdit==JEDIT_SET ); + memset(&ix, 0, sizeof(ix)); + ix.db = pParse->db; + jsonBlobAppendNode(&ix, rawKey?JSONB_TEXTRAW:JSONB_TEXT5, nKey, 0); + pParse->oom |= ix.oom; + rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i]); + if( !JSON_LOOKUP_ISERROR(rc) + && jsonBlobMakeEditable(pParse, ix.nBlob+nKey+v.nBlob) + ){ + assert( !pParse->oom ); + nIns = ix.nBlob + nKey + v.nBlob; + jsonBlobEdit(pParse, j, 0, 0, nIns); + if( !pParse->oom ){ + assert( pParse->aBlob!=0 ); /* Because pParse->oom!=0 */ + assert( ix.aBlob!=0 ); /* Because pPasre->oom!=0 */ + memcpy(&pParse->aBlob[j], ix.aBlob, ix.nBlob); + k = j + ix.nBlob; + memcpy(&pParse->aBlob[k], zKey, nKey); + k += nKey; + memcpy(&pParse->aBlob[k], v.aBlob, v.nBlob); + if( ALWAYS(pParse->delta) ) jsonAfterEditSizeAdjust(pParse, iRoot); + } + } + jsonParseReset(&v); + jsonParseReset(&ix); + return rc; } }else if( zPath[0]=='[' ){ - i = 0; - j = 1; - while( sqlite3Isdigit(zPath[j]) ){ - i = i*10 + zPath[j] - '0'; - j++; + x = pParse->aBlob[iRoot] & 0x0f; + if( x!=JSONB_ARRAY ) return JSON_LOOKUP_NOTFOUND; + n = jsonbPayloadSize(pParse, iRoot, &sz); + k = 0; + i = 1; + while( sqlite3Isdigit(zPath[i]) ){ + k = k*10 + zPath[i] - '0'; + i++; } - if( j<2 || zPath[j]!=']' ){ + if( i<2 || zPath[i]!=']' ){ if( zPath[1]=='#' ){ - JsonNode *pBase = pRoot; - int iBase = iRoot; - if( pRoot->eType!=JSON_ARRAY ) return 0; - for(;;){ - while( j<=pBase->n ){ - if( (pBase[j].jnFlags & JNODE_REMOVE)==0 ) i++; - j += jsonNodeSize(&pBase[j]); - } - if( (pBase->jnFlags & JNODE_APPEND)==0 ) break; - assert( pBase->eU==2 ); - iBase += pBase->u.iAppend; - pBase = &pParse->aNode[iBase]; - j = 1; - } - j = 2; + k = jsonbArrayCount(pParse, iRoot); + i = 2; if( zPath[2]=='-' && sqlite3Isdigit(zPath[3]) ){ - unsigned int x = 0; - j = 3; + unsigned int nn = 0; + i = 3; do{ - x = x*10 + zPath[j] - '0'; - j++; - }while( sqlite3Isdigit(zPath[j]) ); - if( x>i ) return 0; - i -= x; + nn = nn*10 + zPath[i] - '0'; + i++; + }while( sqlite3Isdigit(zPath[i]) ); + if( nn>k ) return JSON_LOOKUP_NOTFOUND; + k -= nn; } - if( zPath[j]!=']' ){ - *pzErr = zPath; - return 0; + if( zPath[i]!=']' ){ + return JSON_LOOKUP_PATHERROR; } }else{ - *pzErr = zPath; - return 0; + return JSON_LOOKUP_PATHERROR; } } - if( pRoot->eType!=JSON_ARRAY ) return 0; - zPath += j + 1; - j = 1; - for(;;){ - while( j<=pRoot->n && (i>0 || (pRoot[j].jnFlags & JNODE_REMOVE)!=0) ){ - if( (pRoot[j].jnFlags & JNODE_REMOVE)==0 ) i--; - j += jsonNodeSize(&pRoot[j]); - } - if( (pRoot->jnFlags & JNODE_APPEND)==0 ) break; - assert( pRoot->eU==2 ); - iRoot += pRoot->u.iAppend; - pRoot = &pParse->aNode[iRoot]; - j = 1; - } - if( j<=pRoot->n ){ - return jsonLookupStep(pParse, iRoot+j, zPath, pApnd, pzErr); - } - if( i==0 && pApnd ){ - u32 iStart; - JsonNode *pNode; - iStart = jsonParseAddNode(pParse, JSON_ARRAY, 1, 0); - pNode = jsonLookupAppend(pParse, zPath, pApnd, pzErr); - if( pParse->oom ) return 0; - if( pNode ){ - pRoot = &pParse->aNode[iRoot]; - assert( pRoot->eU==0 ); - pRoot->u.iAppend = iStart - iRoot; - pRoot->jnFlags |= JNODE_APPEND; - VVA( pRoot->eU = 2 ); + j = iRoot+n; + iEnd = j+sz; + while( jdelta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; } - return pNode; + k--; + n = jsonbPayloadSize(pParse, j, &sz); + if( n==0 ) return JSON_LOOKUP_ERROR; + j += n+sz; + } + if( j>iEnd ) return JSON_LOOKUP_ERROR; + if( k>0 ) return JSON_LOOKUP_NOTFOUND; + if( pParse->eEdit>=JEDIT_INS ){ + JsonParse v; + testcase( pParse->eEdit==JEDIT_INS ); + testcase( pParse->eEdit==JEDIT_SET ); + rc = jsonCreateEditSubstructure(pParse, &v, &zPath[i+1]); + if( !JSON_LOOKUP_ISERROR(rc) + && jsonBlobMakeEditable(pParse, v.nBlob) + ){ + assert( !pParse->oom ); + jsonBlobEdit(pParse, j, 0, v.aBlob, v.nBlob); + } + jsonParseReset(&v); + if( pParse->delta ) jsonAfterEditSizeAdjust(pParse, iRoot); + return rc; } }else{ - *pzErr = zPath; + return JSON_LOOKUP_PATHERROR; } - return 0; + return JSON_LOOKUP_NOTFOUND; } /* -** Append content to pParse that will complete zPath. Return a pointer -** to the inserted node, or return NULL if the append fails. +** Convert a JSON BLOB into text and make that text the return value +** of an SQL function. */ -static JsonNode *jsonLookupAppend( - JsonParse *pParse, /* Append content to the JSON parse */ - const char *zPath, /* Description of content to append */ - int *pApnd, /* Set this flag to 1 */ - const char **pzErr /* Make this point to any syntax error */ +static void jsonReturnTextJsonFromBlob( + sqlite3_context *ctx, + const u8 *aBlob, + u32 nBlob ){ - *pApnd = 1; - if( zPath[0]==0 ){ - jsonParseAddNode(pParse, JSON_NULL, 0, 0); - return pParse->oom ? 0 : &pParse->aNode[pParse->nNode-1]; - } - if( zPath[0]=='.' ){ - jsonParseAddNode(pParse, JSON_OBJECT, 0, 0); - }else if( strncmp(zPath,"[0]",3)==0 ){ - jsonParseAddNode(pParse, JSON_ARRAY, 0, 0); - }else{ - return 0; - } - if( pParse->oom ) return 0; - return jsonLookupStep(pParse, pParse->nNode-1, zPath, pApnd, pzErr); + JsonParse x; + JsonString s; + + if( NEVER(aBlob==0) ) return; + memset(&x, 0, sizeof(x)); + x.aBlob = (u8*)aBlob; + x.nBlob = nBlob; + jsonStringInit(&s, ctx); + jsonTranslateBlobToText(&x, 0, &s); + jsonReturnString(&s, 0, 0); } + /* -** Return the text of a syntax error message on a JSON path. Space is -** obtained from sqlite3_malloc(). +** Return the value of the BLOB node at index i. +** +** If the value is a primitive, return it as an SQL value. +** If the value is an array or object, return it as either +** JSON text or the BLOB encoding, depending on the JSON_B flag +** on the userdata. */ -static char *jsonPathSyntaxError(const char *zErr){ - return sqlite3_mprintf("JSON path error near '%q'", zErr); +static void jsonReturnFromBlob( + JsonParse *pParse, /* Complete JSON parse tree */ + u32 i, /* Index of the node */ + sqlite3_context *pCtx, /* Return value for this function */ + int textOnly /* return text JSON. Disregard user-data */ +){ + u32 n, sz; + int rc; + sqlite3 *db = sqlite3_context_db_handle(pCtx); + + n = jsonbPayloadSize(pParse, i, &sz); + if( n==0 ){ + sqlite3_result_error(pCtx, "malformed JSON", -1); + return; + } + switch( pParse->aBlob[i] & 0x0f ){ + case JSONB_NULL: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_null(pCtx); + break; + } + case JSONB_TRUE: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_int(pCtx, 1); + break; + } + case JSONB_FALSE: { + if( sz ) goto returnfromblob_malformed; + sqlite3_result_int(pCtx, 0); + break; + } + case JSONB_INT5: + case JSONB_INT: { + sqlite3_int64 iRes = 0; + char *z; + int bNeg = 0; + char x; + if( sz==0 ) goto returnfromblob_malformed; + x = (char)pParse->aBlob[i+n]; + if( x=='-' ){ + if( sz<2 ) goto returnfromblob_malformed; + n++; + sz--; + bNeg = 1; + } + z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz); + if( z==0 ) goto returnfromblob_oom; + rc = sqlite3DecOrHexToI64(z, &iRes); + sqlite3DbFree(db, z); + if( rc==0 ){ + sqlite3_result_int64(pCtx, bNeg ? -iRes : iRes); + }else if( rc==3 && bNeg ){ + sqlite3_result_int64(pCtx, SMALLEST_INT64); + }else if( rc==1 ){ + goto returnfromblob_malformed; + }else{ + if( bNeg ){ n--; sz++; } + goto to_double; + } + break; + } + case JSONB_FLOAT5: + case JSONB_FLOAT: { + double r; + char *z; + if( sz==0 ) goto returnfromblob_malformed; + to_double: + z = sqlite3DbStrNDup(db, (const char*)&pParse->aBlob[i+n], (int)sz); + if( z==0 ) goto returnfromblob_oom; + rc = sqlite3AtoF(z, &r, sqlite3Strlen30(z), SQLITE_UTF8); + sqlite3DbFree(db, z); + if( rc<=0 ) goto returnfromblob_malformed; + sqlite3_result_double(pCtx, r); + break; + } + case JSONB_TEXTRAW: + case JSONB_TEXT: { + sqlite3_result_text(pCtx, (char*)&pParse->aBlob[i+n], sz, + SQLITE_TRANSIENT); + break; + } + case JSONB_TEXT5: + case JSONB_TEXTJ: { + /* Translate JSON formatted string into raw text */ + u32 iIn, iOut; + const char *z; + char *zOut; + u32 nOut = sz; + z = (const char*)&pParse->aBlob[i+n]; + zOut = sqlite3DbMallocRaw(db, nOut+1); + if( zOut==0 ) goto returnfromblob_oom; + for(iIn=iOut=0; iIn=2 ); + zOut[iOut++] = (char)(0xc0 | (v>>6)); + zOut[iOut++] = 0x80 | (v&0x3f); + }else if( v<0x10000 ){ + assert( szEscape>=3 ); + zOut[iOut++] = 0xe0 | (v>>12); + zOut[iOut++] = 0x80 | ((v>>6)&0x3f); + zOut[iOut++] = 0x80 | (v&0x3f); + }else if( v==JSON_INVALID_CHAR ){ + /* Silently ignore illegal unicode */ + }else{ + assert( szEscape>=4 ); + zOut[iOut++] = 0xf0 | (v>>18); + zOut[iOut++] = 0x80 | ((v>>12)&0x3f); + zOut[iOut++] = 0x80 | ((v>>6)&0x3f); + zOut[iOut++] = 0x80 | (v&0x3f); + } + iIn += szEscape - 1; + }else{ + zOut[iOut++] = c; + } + } /* end for() */ + assert( iOut<=nOut ); + zOut[iOut] = 0; + sqlite3_result_text(pCtx, zOut, iOut, SQLITE_DYNAMIC); + break; + } + case JSONB_ARRAY: + case JSONB_OBJECT: { + int flags = textOnly ? 0 : SQLITE_PTR_TO_INT(sqlite3_user_data(pCtx)); + if( flags & JSON_BLOB ){ + sqlite3_result_blob(pCtx, &pParse->aBlob[i], sz+n, SQLITE_TRANSIENT); + }else{ + jsonReturnTextJsonFromBlob(pCtx, &pParse->aBlob[i], sz+n); + } + break; + } + default: { + goto returnfromblob_malformed; + } + } + return; + +returnfromblob_oom: + sqlite3_result_error_nomem(pCtx); + return; + +returnfromblob_malformed: + sqlite3_result_error(pCtx, "malformed JSON", -1); + return; } /* -** Do a node lookup using zPath. Return a pointer to the node on success. -** Return NULL if not found or if there is an error. +** pArg is a function argument that might be an SQL value or a JSON +** value. Figure out what it is and encode it as a JSONB blob. +** Return the results in pParse. ** -** On an error, write an error message into pCtx and increment the -** pParse->nErr counter. +** pParse is uninitialized upon entry. This routine will handle the +** initialization of pParse. The result will be contained in +** pParse->aBlob and pParse->nBlob. pParse->aBlob might be dynamically +** allocated (if pParse->nBlobAlloc is greater than zero) in which case +** the caller is responsible for freeing the space allocated to pParse->aBlob +** when it has finished with it. Or pParse->aBlob might be a static string +** or a value obtained from sqlite3_value_blob(pArg). ** -** If pApnd!=NULL then try to append missing nodes and set *pApnd = 1 if -** nodes are appended. +** If the argument is a BLOB that is clearly not a JSONB, then this +** function might set an error message in ctx and return non-zero. +** It might also set an error message and return non-zero on an OOM error. */ -static JsonNode *jsonLookup( - JsonParse *pParse, /* The JSON to search */ - const char *zPath, /* The path to search */ - int *pApnd, /* Append nodes to complete path if not NULL */ - sqlite3_context *pCtx /* Report errors here, if not NULL */ -){ - const char *zErr = 0; - JsonNode *pNode = 0; - char *zMsg; - - if( zPath==0 ) return 0; - if( zPath[0]!='$' ){ - zErr = zPath; - goto lookup_err; +static int jsonFunctionArgToBlob( + sqlite3_context *ctx, + sqlite3_value *pArg, + JsonParse *pParse +){ + int eType = sqlite3_value_type(pArg); + static u8 aNull[] = { 0x00 }; + memset(pParse, 0, sizeof(pParse[0])); + pParse->db = sqlite3_context_db_handle(ctx); + switch( eType ){ + default: { + pParse->aBlob = aNull; + pParse->nBlob = 1; + return 0; + } + case SQLITE_BLOB: { + if( jsonFuncArgMightBeBinary(pArg) ){ + pParse->aBlob = (u8*)sqlite3_value_blob(pArg); + pParse->nBlob = sqlite3_value_bytes(pArg); + }else{ + sqlite3_result_error(ctx, "JSON cannot hold BLOB values", -1); + return 1; + } + break; + } + case SQLITE_TEXT: { + const char *zJson = (const char*)sqlite3_value_text(pArg); + int nJson = sqlite3_value_bytes(pArg); + if( zJson==0 ) return 1; + if( sqlite3_value_subtype(pArg)==JSON_SUBTYPE ){ + pParse->zJson = (char*)zJson; + pParse->nJson = nJson; + if( jsonConvertTextToBlob(pParse, ctx) ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + sqlite3DbFree(pParse->db, pParse->aBlob); + memset(pParse, 0, sizeof(pParse[0])); + return 1; + } + }else{ + jsonBlobAppendNode(pParse, JSONB_TEXTRAW, nJson, zJson); + } + break; + } + case SQLITE_FLOAT: { + double r = sqlite3_value_double(pArg); + if( NEVER(sqlite3IsNaN(r)) ){ + jsonBlobAppendNode(pParse, JSONB_NULL, 0, 0); + }else{ + int n = sqlite3_value_bytes(pArg); + const char *z = (const char*)sqlite3_value_text(pArg); + if( z==0 ) return 1; + if( z[0]=='I' ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 5, "9e999"); + }else if( z[0]=='-' && z[1]=='I' ){ + jsonBlobAppendNode(pParse, JSONB_FLOAT, 6, "-9e999"); + }else{ + jsonBlobAppendNode(pParse, JSONB_FLOAT, n, z); + } + } + break; + } + case SQLITE_INTEGER: { + int n = sqlite3_value_bytes(pArg); + const char *z = (const char*)sqlite3_value_text(pArg); + if( z==0 ) return 1; + jsonBlobAppendNode(pParse, JSONB_INT, n, z); + break; + } + } + if( pParse->oom ){ + sqlite3_result_error_nomem(ctx); + return 1; + }else{ + return 0; } - zPath++; - pNode = jsonLookupStep(pParse, 0, zPath, pApnd, &zErr); - if( zErr==0 ) return pNode; +} -lookup_err: - pParse->nErr++; - assert( zErr!=0 && pCtx!=0 ); - zMsg = jsonPathSyntaxError(zErr); +/* +** Generate a bad path error. +** +** If ctx is not NULL then push the error message into ctx and return NULL. +** If ctx is NULL, then return the text of the error message. +*/ +static char *jsonBadPathError( + sqlite3_context *ctx, /* The function call containing the error */ + const char *zPath /* The path with the problem */ +){ + char *zMsg = sqlite3_mprintf("bad JSON path: %Q", zPath); + if( ctx==0 ) return zMsg; if( zMsg ){ - sqlite3_result_error(pCtx, zMsg, -1); + sqlite3_result_error(ctx, zMsg, -1); sqlite3_free(zMsg); }else{ - sqlite3_result_error_nomem(pCtx); + sqlite3_result_error_nomem(ctx); } return 0; } +/* argv[0] is a BLOB that seems likely to be a JSONB. Subsequent +** arguments come in parse where each pair contains a JSON path and +** content to insert or set at that patch. Do the updates +** and return the result. +** +** The specific operation is determined by eEdit, which can be one +** of JEDIT_INS, JEDIT_REPL, or JEDIT_SET. +*/ +static void jsonInsertIntoBlob( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv, + int eEdit /* JEDIT_INS, JEDIT_REPL, or JEDIT_SET */ +){ + int i; + u32 rc = 0; + const char *zPath = 0; + int flgs; + JsonParse *p; + JsonParse ax; + + assert( (argc&1)==1 ); + flgs = argc==1 ? 0 : JSON_EDITABLE; + p = jsonParseFuncArg(ctx, argv[0], flgs); + if( p==0 ) return; + for(i=1; inBlob, ax.aBlob, ax.nBlob); + } + rc = 0; + }else{ + p->eEdit = eEdit; + p->nIns = ax.nBlob; + p->aIns = ax.aBlob; + p->delta = 0; + rc = jsonLookupStep(p, 0, zPath+1, 0); + } + jsonParseReset(&ax); + if( rc==JSON_LOOKUP_NOTFOUND ) continue; + if( JSON_LOOKUP_ISERROR(rc) ) goto jsonInsertIntoBlob_patherror; + } + jsonReturnParse(ctx, p); + jsonParseFree(p); + return; + +jsonInsertIntoBlob_patherror: + jsonParseFree(p); + if( rc==JSON_LOOKUP_ERROR ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + }else{ + jsonBadPathError(ctx, zPath); + } + return; +} /* -** Report the wrong number of arguments for json_insert(), json_replace() -** or json_set(). +** If pArg is a blob that seems like a JSONB blob, then initialize +** p to point to that JSONB and return TRUE. If pArg does not seem like +** a JSONB blob, then return FALSE; +** +** This routine is only called if it is already known that pArg is a +** blob. The only open question is whether or not the blob appears +** to be a JSONB blob. */ -static void jsonWrongNumArgs( - sqlite3_context *pCtx, - const char *zFuncName -){ - char *zMsg = sqlite3_mprintf("json_%s() needs an odd number of arguments", - zFuncName); - sqlite3_result_error(pCtx, zMsg, -1); - sqlite3_free(zMsg); +static int jsonArgIsJsonb(sqlite3_value *pArg, JsonParse *p){ + u32 n, sz = 0; + p->aBlob = (u8*)sqlite3_value_blob(pArg); + p->nBlob = (u32)sqlite3_value_bytes(pArg); + if( p->nBlob==0 ){ + p->aBlob = 0; + return 0; + } + if( NEVER(p->aBlob==0) ){ + return 0; + } + if( (p->aBlob[0] & 0x0f)<=JSONB_OBJECT + && (n = jsonbPayloadSize(p, 0, &sz))>0 + && sz+n==p->nBlob + && ((p->aBlob[0] & 0x0f)>JSONB_FALSE || sz==0) + ){ + return 1; + } + p->aBlob = 0; + p->nBlob = 0; + return 0; } /* -** Mark all NULL entries in the Object passed in as JNODE_REMOVE. +** Generate a JsonParse object, containing valid JSONB in aBlob and nBlob, +** from the SQL function argument pArg. Return a pointer to the new +** JsonParse object. +** +** Ownership of the new JsonParse object is passed to the caller. The +** caller should invoke jsonParseFree() on the return value when it +** has finished using it. +** +** If any errors are detected, an appropriate error messages is set +** using sqlite3_result_error() or the equivalent and this routine +** returns NULL. This routine also returns NULL if the pArg argument +** is an SQL NULL value, but no error message is set in that case. This +** is so that SQL functions that are given NULL arguments will return +** a NULL value. */ -static void jsonRemoveAllNulls(JsonNode *pNode){ - int i, n; - assert( pNode->eType==JSON_OBJECT ); - n = pNode->n; - for(i=2; i<=n; i += jsonNodeSize(&pNode[i])+1){ - switch( pNode[i].eType ){ - case JSON_NULL: - pNode[i].jnFlags |= JNODE_REMOVE; - break; - case JSON_OBJECT: - jsonRemoveAllNulls(&pNode[i]); - break; +static JsonParse *jsonParseFuncArg( + sqlite3_context *ctx, + sqlite3_value *pArg, + u32 flgs +){ + int eType; /* Datatype of pArg */ + JsonParse *p = 0; /* Value to be returned */ + JsonParse *pFromCache = 0; /* Value taken from cache */ + sqlite3 *db; /* The database connection */ + + assert( ctx!=0 ); + eType = sqlite3_value_type(pArg); + if( eType==SQLITE_NULL ){ + return 0; + } + pFromCache = jsonCacheSearch(ctx, pArg); + if( pFromCache ){ + pFromCache->nJPRef++; + if( (flgs & JSON_EDITABLE)==0 ){ + return pFromCache; } } -} + db = sqlite3_context_db_handle(ctx); +rebuild_from_cache: + p = sqlite3DbMallocZero(db, sizeof(*p)); + if( p==0 ) goto json_pfa_oom; + memset(p, 0, sizeof(*p)); + p->db = db; + p->nJPRef = 1; + if( pFromCache!=0 ){ + u32 nBlob = pFromCache->nBlob; + p->aBlob = sqlite3DbMallocRaw(db, nBlob); + if( p->aBlob==0 ) goto json_pfa_oom; + memcpy(p->aBlob, pFromCache->aBlob, nBlob); + p->nBlobAlloc = p->nBlob = nBlob; + p->hasNonstd = pFromCache->hasNonstd; + jsonParseFree(pFromCache); + return p; + } + if( eType==SQLITE_BLOB ){ + if( jsonArgIsJsonb(pArg,p) ){ + if( (flgs & JSON_EDITABLE)!=0 && jsonBlobMakeEditable(p, 0)==0 ){ + goto json_pfa_oom; + } + return p; + } + /* If the blob is not valid JSONB, fall through into trying to cast + ** the blob into text which is then interpreted as JSON. (tag-20240123-a) + ** + ** This goes against all historical documentation about how the SQLite + ** JSON functions were suppose to work. From the beginning, blob was + ** reserved for expansion and a blob value should have raised an error. + ** But it did not, due to a bug. And many applications came to depend + ** upon this buggy behavior, espeically when using the CLI and reading + ** JSON text using readfile(), which returns a blob. For this reason + ** we will continue to support the bug moving forward. + ** See for example https://sqlite.org/forum/forumpost/012136abd5292b8d + */ + } + p->zJson = (char*)sqlite3_value_text(pArg); + p->nJson = sqlite3_value_bytes(pArg); + if( db->mallocFailed ) goto json_pfa_oom; + if( p->nJson==0 ) goto json_pfa_malformed; + assert( p->zJson!=0 ); + if( jsonConvertTextToBlob(p, (flgs & JSON_KEEPERROR) ? 0 : ctx) ){ + if( flgs & JSON_KEEPERROR ){ + p->nErr = 1; + return p; + }else{ + jsonParseFree(p); + return 0; + } + }else{ + int isRCStr = sqlite3ValueIsOfClass(pArg, sqlite3RCStrUnref); + int rc; + if( !isRCStr ){ + char *zNew = sqlite3RCStrNew( p->nJson ); + if( zNew==0 ) goto json_pfa_oom; + memcpy(zNew, p->zJson, p->nJson); + p->zJson = zNew; + p->zJson[p->nJson] = 0; + }else{ + sqlite3RCStrRef(p->zJson); + } + p->bJsonIsRCStr = 1; + rc = jsonCacheInsert(ctx, p); + if( rc==SQLITE_NOMEM ) goto json_pfa_oom; + if( flgs & JSON_EDITABLE ){ + pFromCache = p; + p = 0; + goto rebuild_from_cache; + } + } + return p; +json_pfa_malformed: + if( flgs & JSON_KEEPERROR ){ + p->nErr = 1; + return p; + }else{ + jsonParseFree(p); + sqlite3_result_error(ctx, "malformed JSON", -1); + return 0; + } -/**************************************************************************** -** SQL functions used for testing and debugging -****************************************************************************/ +json_pfa_oom: + jsonParseFree(pFromCache); + jsonParseFree(p); + sqlite3_result_error_nomem(ctx); + return 0; +} -#ifdef SQLITE_DEBUG /* -** The json_parse(JSON) function returns a string which describes -** a parse of the JSON provided. Or it returns NULL if JSON is not -** well-formed. +** Make the return value of a JSON function either the raw JSONB blob +** or make it JSON text, depending on whether the JSON_BLOB flag is +** set on the function. */ -static void jsonParseFunc( +static void jsonReturnParse( sqlite3_context *ctx, - int argc, - sqlite3_value **argv + JsonParse *p ){ - JsonString s; /* Output string - not real JSON */ - JsonParse x; /* The parse */ - u32 i; - - assert( argc==1 ); - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - jsonParseFindParents(&x); - jsonInit(&s, ctx); - for(i=0; ioom ){ + sqlite3_result_error_nomem(ctx); + return; + } + flgs = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( flgs & JSON_BLOB ){ + if( p->nBlobAlloc>0 && !p->bReadOnly ){ + sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_DYNAMIC); + p->nBlobAlloc = 0; }else{ - zType = jsonType[x.aNode[i].eType]; + sqlite3_result_blob(ctx, p->aBlob, p->nBlob, SQLITE_TRANSIENT); } - jsonPrintf(100, &s,"node %3u: %7s n=%-4d up=%-4d", - i, zType, x.aNode[i].n, x.aUp[i]); - assert( x.aNode[i].eU==0 || x.aNode[i].eU==1 ); - if( x.aNode[i].u.zJContent!=0 ){ - assert( x.aNode[i].eU==1 ); - jsonAppendRaw(&s, " ", 1); - jsonAppendRaw(&s, x.aNode[i].u.zJContent, x.aNode[i].n); - }else{ - assert( x.aNode[i].eU==0 ); + }else{ + JsonString s; + jsonStringInit(&s, ctx); + p->delta = 0; + jsonTranslateBlobToText(p, 0, &s); + jsonReturnString(&s, p, ctx); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); + } +} + +/**************************************************************************** +** SQL functions used for testing and debugging +****************************************************************************/ + +#if SQLITE_DEBUG +/* +** Decode JSONB bytes in aBlob[] starting at iStart through but not +** including iEnd. Indent the +** content by nIndent spaces. +*/ +static void jsonDebugPrintBlob( + JsonParse *pParse, /* JSON content */ + u32 iStart, /* Start rendering here */ + u32 iEnd, /* Do not render this byte or any byte after this one */ + int nIndent, /* Indent by this many spaces */ + sqlite3_str *pOut /* Generate output into this sqlite3_str object */ +){ + while( iStartaBlob[iStart] & 0x0f; + u32 savedNBlob = pParse->nBlob; + sqlite3_str_appendf(pOut, "%5d:%*s", iStart, nIndent, ""); + if( pParse->nBlobAlloc>pParse->nBlob ){ + pParse->nBlob = pParse->nBlobAlloc; + } + nn = n = jsonbPayloadSize(pParse, iStart, &sz); + if( nn==0 ) nn = 1; + if( sz>0 && xaBlob[iStart+i]); + } + if( n==0 ){ + sqlite3_str_appendf(pOut, " ERROR invalid node size\n"); + iStart = n==0 ? iStart+1 : iEnd; + continue; + } + pParse->nBlob = savedNBlob; + if( iStart+n+sz>iEnd ){ + iEnd = iStart+n+sz; + if( iEnd>pParse->nBlob ){ + if( pParse->nBlobAlloc>0 && iEnd>pParse->nBlobAlloc ){ + iEnd = pParse->nBlobAlloc; + }else{ + iEnd = pParse->nBlob; + } + } } - jsonAppendRaw(&s, "\n", 1); + sqlite3_str_appendall(pOut," <-- "); + switch( x ){ + case JSONB_NULL: sqlite3_str_appendall(pOut,"null"); break; + case JSONB_TRUE: sqlite3_str_appendall(pOut,"true"); break; + case JSONB_FALSE: sqlite3_str_appendall(pOut,"false"); break; + case JSONB_INT: sqlite3_str_appendall(pOut,"int"); break; + case JSONB_INT5: sqlite3_str_appendall(pOut,"int5"); break; + case JSONB_FLOAT: sqlite3_str_appendall(pOut,"float"); break; + case JSONB_FLOAT5: sqlite3_str_appendall(pOut,"float5"); break; + case JSONB_TEXT: sqlite3_str_appendall(pOut,"text"); break; + case JSONB_TEXTJ: sqlite3_str_appendall(pOut,"textj"); break; + case JSONB_TEXT5: sqlite3_str_appendall(pOut,"text5"); break; + case JSONB_TEXTRAW: sqlite3_str_appendall(pOut,"textraw"); break; + case JSONB_ARRAY: { + sqlite3_str_appendf(pOut,"array, %u bytes\n", sz); + jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut); + showContent = 0; + break; + } + case JSONB_OBJECT: { + sqlite3_str_appendf(pOut, "object, %u bytes\n", sz); + jsonDebugPrintBlob(pParse, iStart+n, iStart+n+sz, nIndent+2, pOut); + showContent = 0; + break; + } + default: { + sqlite3_str_appendall(pOut, "ERROR: unknown node type\n"); + showContent = 0; + break; + } + } + if( showContent ){ + if( sz==0 && x<=JSONB_FALSE ){ + sqlite3_str_append(pOut, "\n", 1); + }else{ + u32 j; + sqlite3_str_appendall(pOut, ": \""); + for(j=iStart+n; jaBlob[j]; + if( c<0x20 || c>=0x7f ) c = '.'; + sqlite3_str_append(pOut, (char*)&c, 1); + } + sqlite3_str_append(pOut, "\"\n", 2); + } + } + iStart += n + sz; } - jsonParseReset(&x); - jsonResult(&s); } +static void jsonShowParse(JsonParse *pParse){ + sqlite3_str out; + char zBuf[1000]; + if( pParse==0 ){ + printf("NULL pointer\n"); + return; + }else{ + printf("nBlobAlloc = %u\n", pParse->nBlobAlloc); + printf("nBlob = %u\n", pParse->nBlob); + printf("delta = %d\n", pParse->delta); + if( pParse->nBlob==0 ) return; + printf("content (bytes 0..%u):\n", pParse->nBlob-1); + } + sqlite3StrAccumInit(&out, 0, zBuf, sizeof(zBuf), 1000000); + jsonDebugPrintBlob(pParse, 0, pParse->nBlob, 0, &out); + printf("%s", sqlite3_str_value(&out)); + sqlite3_str_reset(&out); +} +#endif /* SQLITE_DEBUG */ +#ifdef SQLITE_DEBUG /* -** The json_test1(JSON) function return true (1) if the input is JSON -** text generated by another json function. It returns (0) if the input -** is not known to be JSON. +** SQL function: json_parse(JSON) +** +** Parse JSON using jsonParseFuncArg(). Return text that is a +** human-readable dump of the binary JSONB for the input parameter. */ -static void jsonTest1Func( +static void jsonParseFunc( sqlite3_context *ctx, int argc, sqlite3_value **argv ){ - UNUSED_PARAMETER(argc); - sqlite3_result_int(ctx, sqlite3_value_subtype(argv[0])==JSON_SUBTYPE); + JsonParse *p; /* The parse */ + sqlite3_str out; + + assert( argc>=1 ); + sqlite3StrAccumInit(&out, 0, 0, 0, 1000000); + p = jsonParseFuncArg(ctx, argv[0], 0); + if( p==0 ) return; + if( argc==1 ){ + jsonDebugPrintBlob(p, 0, p->nBlob, 0, &out); + sqlite3_result_text64(ctx, out.zText, out.nChar, SQLITE_DYNAMIC, SQLITE_UTF8); + }else{ + jsonShowParse(p); + } + jsonParseFree(p); } #endif /* SQLITE_DEBUG */ @@ -202272,7 +212513,7 @@ static void jsonTest1Func( ****************************************************************************/ /* -** Implementation of the json_QUOTE(VALUE) function. Return a JSON value +** Implementation of the json_quote(VALUE) function. Return a JSON value ** corresponding to the SQL value input. Mostly this means putting ** double-quotes around strings and returning the unquoted string "null" ** when given a NULL input. @@ -202285,9 +212526,9 @@ static void jsonQuoteFunc( JsonString jx; UNUSED_PARAMETER(argc); - jsonInit(&jx, ctx); - jsonAppendValue(&jx, argv[0]); - jsonResult(&jx); + jsonStringInit(&jx, ctx); + jsonAppendSqlValue(&jx, argv[0]); + jsonReturnString(&jx, 0, 0); sqlite3_result_subtype(ctx, JSON_SUBTYPE); } @@ -202304,18 +212545,17 @@ static void jsonArrayFunc( int i; JsonString jx; - jsonInit(&jx, ctx); + jsonStringInit(&jx, ctx); jsonAppendChar(&jx, '['); for(i=0; inNode ); if( argc==2 ){ const char *zPath = (const char*)sqlite3_value_text(argv[1]); - pNode = jsonLookup(p, zPath, 0, ctx); + if( zPath==0 ){ + jsonParseFree(p); + return; + } + i = jsonLookupStep(p, 0, zPath[0]=='$' ? zPath+1 : "@", 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + /* no-op */ + }else if( i==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + eErr = 1; + i = 0; + } }else{ - pNode = p->aNode; - } - if( pNode==0 ){ - return; + i = 0; } - if( pNode->eType==JSON_ARRAY ){ - assert( (pNode->jnFlags & JNODE_APPEND)==0 ); - for(i=1; i<=pNode->n; n++){ - i += jsonNodeSize(&pNode[i]); - } + if( (p->aBlob[i] & 0x0f)==JSONB_ARRAY ){ + cnt = jsonbArrayCount(p, i); } - sqlite3_result_int64(ctx, n); + if( !eErr ) sqlite3_result_int64(ctx, cnt); + jsonParseFree(p); } -/* -** Bit values for the flags passed into jsonExtractFunc() or -** jsonSetFunc() via the user-data value. -*/ -#define JSON_JSON 0x01 /* Result is always JSON */ -#define JSON_SQL 0x02 /* Result is always SQL */ -#define JSON_ABPATH 0x03 /* Allow abbreviated JSON path specs */ -#define JSON_ISSET 0x04 /* json_set(), not json_insert() */ +/* True if the string is all digits */ +static int jsonAllDigits(const char *z, int n){ + int i; + for(i=0; i2 ){ + jsonAppendChar(&jx, '['); + } + for(i=1; i and ->> operators accept abbreviated PATH arguments. This - ** is mostly for compatibility with PostgreSQL, but also for - ** convenience. - ** - ** NUMBER ==> $[NUMBER] // PG compatible - ** LABEL ==> $.LABEL // PG compatible - ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience - */ - jsonInit(&jx, ctx); - if( sqlite3Isdigit(zPath[0]) ){ - jsonAppendRaw(&jx, "$[", 2); - jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); - jsonAppendRaw(&jx, "]", 2); - }else{ - jsonAppendRaw(&jx, "$.", 1 + (zPath[0]!='[')); - jsonAppendRaw(&jx, zPath, (int)strlen(zPath)); - jsonAppendChar(&jx, 0); - } - pNode = jx.bErr ? 0 : jsonLookup(p, jx.zBuf, 0, ctx); - jsonReset(&jx); + const char *zPath = (const char*)sqlite3_value_text(argv[i]); + int nPath; + u32 j; + if( zPath==0 ) goto json_extract_error; + nPath = sqlite3Strlen30(zPath); + if( zPath[0]=='$' ){ + j = jsonLookupStep(p, 0, zPath+1, 0); + }else if( (flags & JSON_ABPATH) ){ + /* The -> and ->> operators accept abbreviated PATH arguments. This + ** is mostly for compatibility with PostgreSQL, but also for + ** convenience. + ** + ** NUMBER ==> $[NUMBER] // PG compatible + ** LABEL ==> $.LABEL // PG compatible + ** [NUMBER] ==> $[NUMBER] // Not PG. Purely for convenience + */ + jsonStringInit(&jx, ctx); + if( jsonAllDigits(zPath, nPath) ){ + jsonAppendRawNZ(&jx, "[", 1); + jsonAppendRaw(&jx, zPath, nPath); + jsonAppendRawNZ(&jx, "]", 2); + }else if( jsonAllAlphanum(zPath, nPath) ){ + jsonAppendRawNZ(&jx, ".", 1); + jsonAppendRaw(&jx, zPath, nPath); + }else if( zPath[0]=='[' && nPath>=3 && zPath[nPath-1]==']' ){ + jsonAppendRaw(&jx, zPath, nPath); }else{ - pNode = jsonLookup(p, zPath, 0, ctx); + jsonAppendRawNZ(&jx, ".\"", 2); + jsonAppendRaw(&jx, zPath, nPath); + jsonAppendRawNZ(&jx, "\"", 1); } - if( pNode ){ + jsonStringTerminate(&jx); + j = jsonLookupStep(p, 0, jx.zBuf, 0); + jsonStringReset(&jx); + }else{ + jsonBadPathError(ctx, zPath); + goto json_extract_error; + } + if( jnBlob ){ + if( argc==2 ){ if( flags & JSON_JSON ){ - jsonReturnJson(pNode, ctx, 0); + jsonStringInit(&jx, ctx); + jsonTranslateBlobToText(p, j, &jx); + jsonReturnString(&jx, 0, 0); + jsonStringReset(&jx); + assert( (flags & JSON_BLOB)==0 ); + sqlite3_result_subtype(ctx, JSON_SUBTYPE); }else{ - jsonReturn(pNode, ctx, 0); - sqlite3_result_subtype(ctx, 0); + jsonReturnFromBlob(p, j, ctx, 0); + if( (flags & (JSON_SQL|JSON_BLOB))==0 + && (p->aBlob[j]&0x0f)>=JSONB_ARRAY + ){ + sqlite3_result_subtype(ctx, JSON_SUBTYPE); + } } + }else{ + jsonAppendSeparator(&jx); + jsonTranslateBlobToText(p, j, &jx); } - }else{ - pNode = jsonLookup(p, zPath, 0, ctx); - if( p->nErr==0 && pNode ) jsonReturn(pNode, ctx, 0); - } - }else{ - /* Two or more PATH arguments results in a JSON array with each - ** element of the array being the value selected by one of the PATHs */ - int i; - jsonInit(&jx, ctx); - jsonAppendChar(&jx, '['); - for(i=1; inErr ) break; - jsonAppendSeparator(&jx); - if( pNode ){ - jsonRenderNode(pNode, &jx, 0); + }else if( j==JSON_LOOKUP_NOTFOUND ){ + if( argc==2 ){ + goto json_extract_error; /* Return NULL if not found */ }else{ - jsonAppendRaw(&jx, "null", 4); + jsonAppendSeparator(&jx); + jsonAppendRawNZ(&jx, "null", 4); } + }else if( j==JSON_LOOKUP_ERROR ){ + sqlite3_result_error(ctx, "malformed JSON", -1); + goto json_extract_error; + }else{ + jsonBadPathError(ctx, zPath); + goto json_extract_error; } - if( i==argc ){ - jsonAppendChar(&jx, ']'); - jsonResult(&jx); + } + if( argc>2 ){ + jsonAppendChar(&jx, ']'); + jsonReturnString(&jx, 0, 0); + if( (flags & JSON_BLOB)==0 ){ sqlite3_result_subtype(ctx, JSON_SUBTYPE); } - jsonReset(&jx); } +json_extract_error: + jsonStringReset(&jx); + jsonParseFree(p); + return; } -/* This is the RFC 7396 MergePatch algorithm. -*/ -static JsonNode *jsonMergePatch( - JsonParse *pParse, /* The JSON parser that contains the TARGET */ - u32 iTarget, /* Node of the TARGET in pParse */ - JsonNode *pPatch /* The PATCH */ -){ - u32 i, j; - u32 iRoot; - JsonNode *pTarget; - if( pPatch->eType!=JSON_OBJECT ){ - return pPatch; - } - assert( iTargetnNode ); - pTarget = &pParse->aNode[iTarget]; - assert( (pPatch->jnFlags & JNODE_APPEND)==0 ); - if( pTarget->eType!=JSON_OBJECT ){ - jsonRemoveAllNulls(pPatch); - return pPatch; - } - iRoot = iTarget; - for(i=1; in; i += jsonNodeSize(&pPatch[i+1])+1){ - u32 nKey; - const char *zKey; - assert( pPatch[i].eType==JSON_STRING ); - assert( pPatch[i].jnFlags & JNODE_LABEL ); - assert( pPatch[i].eU==1 ); - nKey = pPatch[i].n; - zKey = pPatch[i].u.zJContent; - assert( (pPatch[i].jnFlags & JNODE_RAW)==0 ); - for(j=1; jn; j += jsonNodeSize(&pTarget[j+1])+1 ){ - assert( pTarget[j].eType==JSON_STRING ); - assert( pTarget[j].jnFlags & JNODE_LABEL ); - assert( (pPatch[i].jnFlags & JNODE_RAW)==0 ); - if( pTarget[j].n==nKey && strncmp(pTarget[j].u.zJContent,zKey,nKey)==0 ){ - if( pTarget[j+1].jnFlags & (JNODE_REMOVE|JNODE_PATCH) ) break; - if( pPatch[i+1].eType==JSON_NULL ){ - pTarget[j+1].jnFlags |= JNODE_REMOVE; - }else{ - JsonNode *pNew = jsonMergePatch(pParse, iTarget+j+1, &pPatch[i+1]); - if( pNew==0 ) return 0; - pTarget = &pParse->aNode[iTarget]; - if( pNew!=&pTarget[j+1] ){ - assert( pTarget[j+1].eU==0 - || pTarget[j+1].eU==1 - || pTarget[j+1].eU==2 ); - testcase( pTarget[j+1].eU==1 ); - testcase( pTarget[j+1].eU==2 ); - VVA( pTarget[j+1].eU = 5 ); - pTarget[j+1].u.pPatch = pNew; - pTarget[j+1].jnFlags |= JNODE_PATCH; - } - } - break; +/* +** Return codes for jsonMergePatch() +*/ +#define JSON_MERGE_OK 0 /* Success */ +#define JSON_MERGE_BADTARGET 1 /* Malformed TARGET blob */ +#define JSON_MERGE_BADPATCH 2 /* Malformed PATCH blob */ +#define JSON_MERGE_OOM 3 /* Out-of-memory condition */ + +/* +** RFC-7396 MergePatch for two JSONB blobs. +** +** pTarget is the target. pPatch is the patch. The target is updated +** in place. The patch is read-only. +** +** The original RFC-7396 algorithm is this: +** +** define MergePatch(Target, Patch): +** if Patch is an Object: +** if Target is not an Object: +** Target = {} # Ignore the contents and set it to an empty Object +** for each Name/Value pair in Patch: +** if Value is null: +** if Name exists in Target: +** remove the Name/Value pair from Target +** else: +** Target[Name] = MergePatch(Target[Name], Value) +** return Target +** else: +** return Patch +** +** Here is an equivalent algorithm restructured to show the actual +** implementation: +** +** 01 define MergePatch(Target, Patch): +** 02 if Patch is not an Object: +** 03 return Patch +** 04 else: // if Patch is an Object +** 05 if Target is not an Object: +** 06 Target = {} +** 07 for each Name/Value pair in Patch: +** 08 if Name exists in Target: +** 09 if Value is null: +** 10 remove the Name/Value pair from Target +** 11 else +** 12 Target[name] = MergePatch(Target[Name], Value) +** 13 else if Value is not NULL: +** 14 if Value is not an Object: +** 15 Target[name] = Value +** 16 else: +** 17 Target[name] = MergePatch('{}',value) +** 18 return Target +** | +** ^---- Line numbers referenced in comments in the implementation +*/ +static int jsonMergePatch( + JsonParse *pTarget, /* The JSON parser that contains the TARGET */ + u32 iTarget, /* Index of TARGET in pTarget->aBlob[] */ + const JsonParse *pPatch, /* The PATCH */ + u32 iPatch /* Index of PATCH in pPatch->aBlob[] */ +){ + u8 x; /* Type of a single node */ + u32 n, sz=0; /* Return values from jsonbPayloadSize() */ + u32 iTCursor; /* Cursor position while scanning the target object */ + u32 iTStart; /* First label in the target object */ + u32 iTEndBE; /* Original first byte past end of target, before edit */ + u32 iTEnd; /* Current first byte past end of target */ + u8 eTLabel; /* Node type of the target label */ + u32 iTLabel = 0; /* Index of the label */ + u32 nTLabel = 0; /* Header size in bytes for the target label */ + u32 szTLabel = 0; /* Size of the target label payload */ + u32 iTValue = 0; /* Index of the target value */ + u32 nTValue = 0; /* Header size of the target value */ + u32 szTValue = 0; /* Payload size for the target value */ + + u32 iPCursor; /* Cursor position while scanning the patch */ + u32 iPEnd; /* First byte past the end of the patch */ + u8 ePLabel; /* Node type of the patch label */ + u32 iPLabel; /* Start of patch label */ + u32 nPLabel; /* Size of header on the patch label */ + u32 szPLabel; /* Payload size of the patch label */ + u32 iPValue; /* Start of patch value */ + u32 nPValue; /* Header size for the patch value */ + u32 szPValue; /* Payload size of the patch value */ + + assert( iTarget>=0 && iTargetnBlob ); + assert( iPatch>=0 && iPatchnBlob ); + x = pPatch->aBlob[iPatch] & 0x0f; + if( x!=JSONB_OBJECT ){ /* Algorithm line 02 */ + u32 szPatch; /* Total size of the patch, header+payload */ + u32 szTarget; /* Total size of the target, header+payload */ + n = jsonbPayloadSize(pPatch, iPatch, &sz); + szPatch = n+sz; + sz = 0; + n = jsonbPayloadSize(pTarget, iTarget, &sz); + szTarget = n+sz; + jsonBlobEdit(pTarget, iTarget, szTarget, pPatch->aBlob+iPatch, szPatch); + return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; /* Line 03 */ + } + x = pTarget->aBlob[iTarget] & 0x0f; + if( x!=JSONB_OBJECT ){ /* Algorithm line 05 */ + n = jsonbPayloadSize(pTarget, iTarget, &sz); + jsonBlobEdit(pTarget, iTarget+n, sz, 0, 0); + x = pTarget->aBlob[iTarget]; + pTarget->aBlob[iTarget] = (x & 0xf0) | JSONB_OBJECT; + } + n = jsonbPayloadSize(pPatch, iPatch, &sz); + if( NEVER(n==0) ) return JSON_MERGE_BADPATCH; + iPCursor = iPatch+n; + iPEnd = iPCursor+sz; + n = jsonbPayloadSize(pTarget, iTarget, &sz); + if( NEVER(n==0) ) return JSON_MERGE_BADTARGET; + iTStart = iTarget+n; + iTEndBE = iTStart+sz; + + while( iPCursoraBlob[iPCursor] & 0x0f; + if( ePLabelJSONB_TEXTRAW ){ + return JSON_MERGE_BADPATCH; + } + nPLabel = jsonbPayloadSize(pPatch, iPCursor, &szPLabel); + if( nPLabel==0 ) return JSON_MERGE_BADPATCH; + iPValue = iPCursor + nPLabel + szPLabel; + if( iPValue>=iPEnd ) return JSON_MERGE_BADPATCH; + nPValue = jsonbPayloadSize(pPatch, iPValue, &szPValue); + if( nPValue==0 ) return JSON_MERGE_BADPATCH; + iPCursor = iPValue + nPValue + szPValue; + if( iPCursor>iPEnd ) return JSON_MERGE_BADPATCH; + + iTCursor = iTStart; + iTEnd = iTEndBE + pTarget->delta; + while( iTCursoraBlob[iTCursor] & 0x0f; + if( eTLabelJSONB_TEXTRAW ){ + return JSON_MERGE_BADTARGET; + } + nTLabel = jsonbPayloadSize(pTarget, iTCursor, &szTLabel); + if( nTLabel==0 ) return JSON_MERGE_BADTARGET; + iTValue = iTLabel + nTLabel + szTLabel; + if( iTValue>=iTEnd ) return JSON_MERGE_BADTARGET; + nTValue = jsonbPayloadSize(pTarget, iTValue, &szTValue); + if( nTValue==0 ) return JSON_MERGE_BADTARGET; + if( iTValue + nTValue + szTValue > iTEnd ) return JSON_MERGE_BADTARGET; + isEqual = jsonLabelCompare( + (const char*)&pPatch->aBlob[iPLabel+nPLabel], + szPLabel, + (ePLabel==JSONB_TEXT || ePLabel==JSONB_TEXTRAW), + (const char*)&pTarget->aBlob[iTLabel+nTLabel], + szTLabel, + (eTLabel==JSONB_TEXT || eTLabel==JSONB_TEXTRAW)); + if( isEqual ) break; + iTCursor = iTValue + nTValue + szTValue; + } + x = pPatch->aBlob[iPValue] & 0x0f; + if( iTCursoroom) ) return JSON_MERGE_OOM; + }else{ + /* Algorithm line 12 */ + int rc, savedDelta = pTarget->delta; + pTarget->delta = 0; + rc = jsonMergePatch(pTarget, iTValue, pPatch, iPValue); + if( rc ) return rc; + pTarget->delta += savedDelta; + } + }else if( x>0 ){ /* Algorithm line 13 */ + /* No match and patch value is not NULL */ + u32 szNew = szPLabel+nPLabel; + if( (pPatch->aBlob[iPValue] & 0x0f)!=JSONB_OBJECT ){ /* Line 14 */ + jsonBlobEdit(pTarget, iTEnd, 0, 0, szPValue+nPValue+szNew); + if( pTarget->oom ) return JSON_MERGE_OOM; + memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew); + memcpy(&pTarget->aBlob[iTEnd+szNew], + &pPatch->aBlob[iPValue], szPValue+nPValue); + }else{ + int rc, savedDelta; + jsonBlobEdit(pTarget, iTEnd, 0, 0, szNew+1); + if( pTarget->oom ) return JSON_MERGE_OOM; + memcpy(&pTarget->aBlob[iTEnd], &pPatch->aBlob[iPLabel], szNew); + pTarget->aBlob[iTEnd+szNew] = 0x00; + savedDelta = pTarget->delta; + pTarget->delta = 0; + rc = jsonMergePatch(pTarget, iTEnd+szNew,pPatch,iPValue); + if( rc ) return rc; + pTarget->delta += savedDelta; } } - if( j>=pTarget->n && pPatch[i+1].eType!=JSON_NULL ){ - int iStart, iPatch; - iStart = jsonParseAddNode(pParse, JSON_OBJECT, 2, 0); - jsonParseAddNode(pParse, JSON_STRING, nKey, zKey); - iPatch = jsonParseAddNode(pParse, JSON_TRUE, 0, 0); - if( pParse->oom ) return 0; - jsonRemoveAllNulls(pPatch); - pTarget = &pParse->aNode[iTarget]; - assert( pParse->aNode[iRoot].eU==0 || pParse->aNode[iRoot].eU==2 ); - testcase( pParse->aNode[iRoot].eU==2 ); - pParse->aNode[iRoot].jnFlags |= JNODE_APPEND; - VVA( pParse->aNode[iRoot].eU = 2 ); - pParse->aNode[iRoot].u.iAppend = iStart - iRoot; - iRoot = iStart; - assert( pParse->aNode[iPatch].eU==0 ); - VVA( pParse->aNode[iPatch].eU = 5 ); - pParse->aNode[iPatch].jnFlags |= JNODE_PATCH; - pParse->aNode[iPatch].u.pPatch = &pPatch[i+1]; - } } - return pTarget; + if( pTarget->delta ) jsonAfterEditSizeAdjust(pTarget, iTarget); + return pTarget->oom ? JSON_MERGE_OOM : JSON_MERGE_OK; } + /* ** Implementation of the json_mergepatch(JSON1,JSON2) function. Return a JSON ** object that is the result of running the RFC 7396 MergePatch() algorithm @@ -202553,25 +212953,27 @@ static void jsonPatchFunc( int argc, sqlite3_value **argv ){ - JsonParse x; /* The JSON that is being patched */ - JsonParse y; /* The patch */ - JsonNode *pResult; /* The result of the merge */ + JsonParse *pTarget; /* The TARGET */ + JsonParse *pPatch; /* The PATCH */ + int rc; /* Result code */ UNUSED_PARAMETER(argc); - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - if( jsonParse(&y, ctx, (const char*)sqlite3_value_text(argv[1])) ){ - jsonParseReset(&x); - return; - } - pResult = jsonMergePatch(&x, 0, y.aNode); - assert( pResult!=0 || x.oom ); - if( pResult ){ - jsonReturnJson(pResult, ctx, 0); - }else{ - sqlite3_result_error_nomem(ctx); + assert( argc==2 ); + pTarget = jsonParseFuncArg(ctx, argv[0], JSON_EDITABLE); + if( pTarget==0 ) return; + pPatch = jsonParseFuncArg(ctx, argv[1], 0); + if( pPatch ){ + rc = jsonMergePatch(pTarget, 0, pPatch, 0); + if( rc==JSON_MERGE_OK ){ + jsonReturnParse(ctx, pTarget); + }else if( rc==JSON_MERGE_OOM ){ + sqlite3_result_error_nomem(ctx); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + jsonParseFree(pPatch); } - jsonParseReset(&x); - jsonParseReset(&y); + jsonParseFree(pTarget); } @@ -202595,23 +212997,23 @@ static void jsonObjectFunc( "of arguments", -1); return; } - jsonInit(&jx, ctx); + jsonStringInit(&jx, ctx); jsonAppendChar(&jx, '{'); for(i=0; i1 ? JSON_EDITABLE : 0); + if( p==0 ) return; + for(i=1; ijnFlags |= JNODE_REMOVE; - } - if( (x.aNode[0].jnFlags & JNODE_REMOVE)==0 ){ - jsonReturnJson(x.aNode, ctx, 0); + if( zPath==0 ){ + goto json_remove_done; + } + if( zPath[0]!='$' ){ + goto json_remove_patherror; + } + if( zPath[1]==0 ){ + /* json_remove(j,'$') returns NULL */ + goto json_remove_done; + } + p->eEdit = JEDIT_DEL; + p->delta = 0; + rc = jsonLookupStep(p, 0, zPath+1, 0); + if( JSON_LOOKUP_ISERROR(rc) ){ + if( rc==JSON_LOOKUP_NOTFOUND ){ + continue; /* No-op */ + }else if( rc==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + goto json_remove_done; + } } -remove_done: - jsonParseReset(&x); + jsonReturnParse(ctx, p); + jsonParseFree(p); + return; + +json_remove_patherror: + jsonBadPathError(ctx, zPath); + +json_remove_done: + jsonParseFree(p); + return; } /* @@ -202660,38 +213086,12 @@ static void jsonReplaceFunc( int argc, sqlite3_value **argv ){ - JsonParse x; /* The parse */ - JsonNode *pNode; - const char *zPath; - u32 i; - if( argc<1 ) return; if( (argc&1)==0 ) { jsonWrongNumArgs(ctx, "replace"); return; } - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - assert( x.nNode ); - for(i=1; i<(u32)argc; i+=2){ - zPath = (const char*)sqlite3_value_text(argv[i]); - pNode = jsonLookup(&x, zPath, 0, ctx); - if( x.nErr ) goto replace_err; - if( pNode ){ - assert( pNode->eU==0 || pNode->eU==1 || pNode->eU==4 ); - testcase( pNode->eU!=0 && pNode->eU!=1 ); - pNode->jnFlags |= (u8)JNODE_REPLACE; - VVA( pNode->eU = 4 ); - pNode->u.iReplace = i + 1; - } - } - if( x.aNode[0].jnFlags & JNODE_REPLACE ){ - assert( x.aNode[0].eU==4 ); - sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]); - }else{ - jsonReturnJson(x.aNode, ctx, argv); - } -replace_err: - jsonParseReset(&x); + jsonInsertIntoBlob(ctx, argc, argv, JEDIT_REPL); } @@ -202712,45 +213112,16 @@ static void jsonSetFunc( int argc, sqlite3_value **argv ){ - JsonParse x; /* The parse */ - JsonNode *pNode; - const char *zPath; - u32 i; - int bApnd; - int bIsSet = sqlite3_user_data(ctx)!=0; + + int flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + int bIsSet = (flags&JSON_ISSET)!=0; if( argc<1 ) return; if( (argc&1)==0 ) { jsonWrongNumArgs(ctx, bIsSet ? "set" : "insert"); return; } - if( jsonParse(&x, ctx, (const char*)sqlite3_value_text(argv[0])) ) return; - assert( x.nNode ); - for(i=1; i<(u32)argc; i+=2){ - zPath = (const char*)sqlite3_value_text(argv[i]); - bApnd = 0; - pNode = jsonLookup(&x, zPath, &bApnd, ctx); - if( x.oom ){ - sqlite3_result_error_nomem(ctx); - goto jsonSetDone; - }else if( x.nErr ){ - goto jsonSetDone; - }else if( pNode && (bApnd || bIsSet) ){ - testcase( pNode->eU!=0 && pNode->eU!=1 ); - assert( pNode->eU!=3 && pNode->eU!=5 ); - VVA( pNode->eU = 4 ); - pNode->jnFlags |= (u8)JNODE_REPLACE; - pNode->u.iReplace = i + 1; - } - } - if( x.aNode[0].jnFlags & JNODE_REPLACE ){ - assert( x.aNode[0].eU==4 ); - sqlite3_result_value(ctx, argv[x.aNode[0].u.iReplace]); - }else{ - jsonReturnJson(x.aNode, ctx, argv); - } -jsonSetDone: - jsonParseReset(&x); + jsonInsertIntoBlob(ctx, argc, argv, bIsSet ? JEDIT_SET : JEDIT_INS); } /* @@ -202766,27 +213137,93 @@ static void jsonTypeFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ - const char *zPath; - JsonNode *pNode; + const char *zPath = 0; + u32 i; - p = jsonParseCached(ctx, argv, ctx); + p = jsonParseFuncArg(ctx, argv[0], 0); if( p==0 ) return; if( argc==2 ){ zPath = (const char*)sqlite3_value_text(argv[1]); - pNode = jsonLookup(p, zPath, 0, ctx); + if( zPath==0 ) goto json_type_done; + if( zPath[0]!='$' ){ + jsonBadPathError(ctx, zPath); + goto json_type_done; + } + i = jsonLookupStep(p, 0, zPath+1, 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + /* no-op */ + }else if( i==JSON_LOOKUP_PATHERROR ){ + jsonBadPathError(ctx, zPath); + }else{ + sqlite3_result_error(ctx, "malformed JSON", -1); + } + goto json_type_done; + } }else{ - pNode = p->aNode; - } - if( pNode ){ - sqlite3_result_text(ctx, jsonType[pNode->eType], -1, SQLITE_STATIC); + i = 0; } + sqlite3_result_text(ctx, jsonbType[p->aBlob[i]&0x0f], -1, SQLITE_STATIC); +json_type_done: + jsonParseFree(p); } /* ** json_valid(JSON) -** -** Return 1 if JSON is a well-formed JSON string according to RFC-7159. -** Return 0 otherwise. +** json_valid(JSON, FLAGS) +** +** Check the JSON argument to see if it is well-formed. The FLAGS argument +** encodes the various constraints on what is meant by "well-formed": +** +** 0x01 Canonical RFC-8259 JSON text +** 0x02 JSON text with optional JSON-5 extensions +** 0x04 Superficially appears to be JSONB +** 0x08 Strictly well-formed JSONB +** +** If the FLAGS argument is omitted, it defaults to 1. Useful values for +** FLAGS include: +** +** 1 Strict canonical JSON text +** 2 JSON text perhaps with JSON-5 extensions +** 4 Superficially appears to be JSONB +** 5 Canonical JSON text or superficial JSONB +** 6 JSON-5 text or superficial JSONB +** 8 Strict JSONB +** 9 Canonical JSON text or strict JSONB +** 10 JSON-5 text or strict JSONB +** +** Other flag combinations are redundant. For example, every canonical +** JSON text is also well-formed JSON-5 text, so FLAG values 2 and 3 +** are the same. Similarly, any input that passes a strict JSONB validation +** will also pass the superficial validation so 12 through 15 are the same +** as 8 through 11 respectively. +** +** This routine runs in linear time to validate text and when doing strict +** JSONB validation. Superficial JSONB validation is constant time, +** assuming the BLOB is already in memory. The performance advantage +** of superficial JSONB validation is why that option is provided. +** Application developers can choose to do fast superficial validation or +** slower strict validation, according to their specific needs. +** +** Only the lower four bits of the FLAGS argument are currently used. +** Higher bits are reserved for future expansion. To facilitate +** compatibility, the current implementation raises an error if any bit +** in FLAGS is set other than the lower four bits. +** +** The original circa 2015 implementation of the JSON routines in +** SQLite only supported canonical RFC-8259 JSON text and the json_valid() +** function only accepted one argument. That is why the default value +** for the FLAGS argument is 1, since FLAGS=1 causes this routine to only +** recognize canonical RFC-8259 JSON text as valid. The extra FLAGS +** argument was added when the JSON routines were extended to support +** JSON5-like extensions and binary JSONB stored in BLOBs. +** +** Return Values: +** +** * Raise an error if FLAGS is outside the range of 1 to 15. +** * Return NULL if the input is NULL +** * Return 1 if the input is well-formed. +** * Return 0 if the input is not well-formed. */ static void jsonValidFunc( sqlite3_context *ctx, @@ -202794,11 +213231,127 @@ static void jsonValidFunc( sqlite3_value **argv ){ JsonParse *p; /* The parse */ - UNUSED_PARAMETER(argc); - p = jsonParseCached(ctx, argv, 0); - sqlite3_result_int(ctx, p!=0); + u8 flags = 1; + u8 res = 0; + if( argc==2 ){ + i64 f = sqlite3_value_int64(argv[1]); + if( f<1 || f>15 ){ + sqlite3_result_error(ctx, "FLAGS parameter to json_valid() must be" + " between 1 and 15", -1); + return; + } + flags = f & 0x0f; + } + switch( sqlite3_value_type(argv[0]) ){ + case SQLITE_NULL: { +#ifdef SQLITE_LEGACY_JSON_VALID + /* Incorrect legacy behavior was to return FALSE for a NULL input */ + sqlite3_result_int(ctx, 0); +#endif + return; + } + case SQLITE_BLOB: { + if( jsonFuncArgMightBeBinary(argv[0]) ){ + if( flags & 0x04 ){ + /* Superficial checking only - accomplished by the + ** jsonFuncArgMightBeBinary() call above. */ + res = 1; + }else if( flags & 0x08 ){ + /* Strict checking. Check by translating BLOB->TEXT->BLOB. If + ** no errors occur, call that a "strict check". */ + JsonParse px; + u32 iErr; + memset(&px, 0, sizeof(px)); + px.aBlob = (u8*)sqlite3_value_blob(argv[0]); + px.nBlob = sqlite3_value_bytes(argv[0]); + iErr = jsonbValidityCheck(&px, 0, px.nBlob, 1); + res = iErr==0; + } + break; + } + /* Fall through into interpreting the input as text. See note + ** above at tag-20240123-a. */ + /* no break */ deliberate_fall_through + } + default: { + JsonParse px; + if( (flags & 0x3)==0 ) break; + memset(&px, 0, sizeof(px)); + + p = jsonParseFuncArg(ctx, argv[0], JSON_KEEPERROR); + if( p ){ + if( p->oom ){ + sqlite3_result_error_nomem(ctx); + }else if( p->nErr ){ + /* no-op */ + }else if( (flags & 0x02)!=0 || p->hasNonstd==0 ){ + res = 1; + } + jsonParseFree(p); + }else{ + sqlite3_result_error_nomem(ctx); + } + break; + } + } + sqlite3_result_int(ctx, res); } +/* +** json_error_position(JSON) +** +** If the argument is NULL, return NULL +** +** If the argument is BLOB, do a full validity check and return non-zero +** if the check fails. The return value is the approximate 1-based offset +** to the byte of the element that contains the first error. +** +** Otherwise interpret the argument is TEXT (even if it is numeric) and +** return the 1-based character position for where the parser first recognized +** that the input was not valid JSON, or return 0 if the input text looks +** ok. JSON-5 extensions are accepted. +*/ +static void jsonErrorFunc( + sqlite3_context *ctx, + int argc, + sqlite3_value **argv +){ + i64 iErrPos = 0; /* Error position to be returned */ + JsonParse s; + + assert( argc==1 ); + UNUSED_PARAMETER(argc); + memset(&s, 0, sizeof(s)); + s.db = sqlite3_context_db_handle(ctx); + if( jsonFuncArgMightBeBinary(argv[0]) ){ + s.aBlob = (u8*)sqlite3_value_blob(argv[0]); + s.nBlob = sqlite3_value_bytes(argv[0]); + iErrPos = (i64)jsonbValidityCheck(&s, 0, s.nBlob, 1); + }else{ + s.zJson = (char*)sqlite3_value_text(argv[0]); + if( s.zJson==0 ) return; /* NULL input or OOM */ + s.nJson = sqlite3_value_bytes(argv[0]); + if( jsonConvertTextToBlob(&s,0) ){ + if( s.oom ){ + iErrPos = -1; + }else{ + /* Convert byte-offset s.iErr into a character offset */ + u32 k; + assert( s.zJson!=0 ); /* Because s.oom is false */ + for(k=0; kzBuf==0 ){ - jsonInit(pStr, ctx); + jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '['); }else if( pStr->nUsed>1 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; - jsonAppendValue(pStr, argv[0]); + jsonAppendSqlValue(pStr, argv[0]); } } static void jsonArrayCompute(sqlite3_context *ctx, int isFinal){ JsonString *pStr; pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); if( pStr ){ + int flags; pStr->pCtx = ctx; jsonAppendChar(pStr, ']'); - if( pStr->bErr ){ - if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx); - assert( pStr->bStatic ); + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( pStr->eErr ){ + jsonReturnString(pStr, 0, 0); + return; + }else if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(pStr); + if( isFinal ){ + if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf); + }else{ + jsonStringTrimOneChar(pStr); + } + return; }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, - pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free); + pStr->bStatic ? SQLITE_TRANSIENT : + sqlite3RCStrUnref); pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT); - pStr->nUsed--; + jsonStringTrimOneChar(pStr); } }else{ sqlite3_result_text(ctx, "[]", 2, SQLITE_STATIC); @@ -202879,7 +213443,7 @@ static void jsonGroupInverse( pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); #ifdef NEVER /* pStr is always non-NULL since jsonArrayStep() or jsonObjectStep() will - ** always have been called to initalize it */ + ** always have been called to initialize it */ if( NEVER(!pStr) ) return; #endif z = pStr->zBuf; @@ -202923,34 +213487,46 @@ static void jsonObjectStep( pStr = (JsonString*)sqlite3_aggregate_context(ctx, sizeof(*pStr)); if( pStr ){ if( pStr->zBuf==0 ){ - jsonInit(pStr, ctx); + jsonStringInit(pStr, ctx); jsonAppendChar(pStr, '{'); }else if( pStr->nUsed>1 ){ jsonAppendChar(pStr, ','); } pStr->pCtx = ctx; z = (const char*)sqlite3_value_text(argv[0]); - n = (u32)sqlite3_value_bytes(argv[0]); + n = sqlite3Strlen30(z); jsonAppendString(pStr, z, n); jsonAppendChar(pStr, ':'); - jsonAppendValue(pStr, argv[1]); + jsonAppendSqlValue(pStr, argv[1]); } } static void jsonObjectCompute(sqlite3_context *ctx, int isFinal){ JsonString *pStr; pStr = (JsonString*)sqlite3_aggregate_context(ctx, 0); if( pStr ){ + int flags; jsonAppendChar(pStr, '}'); - if( pStr->bErr ){ - if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx); - assert( pStr->bStatic ); + pStr->pCtx = ctx; + flags = SQLITE_PTR_TO_INT(sqlite3_user_data(ctx)); + if( pStr->eErr ){ + jsonReturnString(pStr, 0, 0); + return; + }else if( flags & JSON_BLOB ){ + jsonReturnStringAsBlob(pStr); + if( isFinal ){ + if( !pStr->bStatic ) sqlite3RCStrUnref(pStr->zBuf); + }else{ + jsonStringTrimOneChar(pStr); + } + return; }else if( isFinal ){ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, - pStr->bStatic ? SQLITE_TRANSIENT : sqlite3_free); + pStr->bStatic ? SQLITE_TRANSIENT : + sqlite3RCStrUnref); pStr->bStatic = 1; }else{ sqlite3_result_text(ctx, pStr->zBuf, (int)pStr->nUsed, SQLITE_TRANSIENT); - pStr->nUsed--; + jsonStringTrimOneChar(pStr); } }else{ sqlite3_result_text(ctx, "{}", 2, SQLITE_STATIC); @@ -202970,19 +213546,37 @@ static void jsonObjectFinal(sqlite3_context *ctx){ /**************************************************************************** ** The json_each virtual table ****************************************************************************/ +typedef struct JsonParent JsonParent; +struct JsonParent { + u32 iHead; /* Start of object or array */ + u32 iValue; /* Start of the value */ + u32 iEnd; /* First byte past the end */ + u32 nPath; /* Length of path */ + i64 iKey; /* Key for JSONB_ARRAY */ +}; + typedef struct JsonEachCursor JsonEachCursor; struct JsonEachCursor { sqlite3_vtab_cursor base; /* Base class - must be first */ u32 iRowid; /* The rowid */ - u32 iBegin; /* The first node of the scan */ - u32 i; /* Index in sParse.aNode[] of current row */ + u32 i; /* Index in sParse.aBlob[] of current row */ u32 iEnd; /* EOF when i equals or exceeds this value */ - u8 eType; /* Type of top-level element */ + u32 nRoot; /* Size of the root path in bytes */ + u8 eType; /* Type of the container for element i */ u8 bRecursive; /* True for json_tree(). False for json_each() */ - char *zJson; /* Input JSON */ - char *zRoot; /* Path by which to filter zJson */ + u32 nParent; /* Current nesting depth */ + u32 nParentAlloc; /* Space allocated for aParent[] */ + JsonParent *aParent; /* Parent elements of i */ + sqlite3 *db; /* Database connection */ + JsonString path; /* Current path */ JsonParse sParse; /* Parse of the input JSON */ }; +typedef struct JsonEachConnection JsonEachConnection; +struct JsonEachConnection { + sqlite3_vtab base; /* Base class - must be first */ + sqlite3 *db; /* Database connection */ +}; + /* Constructor for the json_each virtual table */ static int jsonEachConnect( @@ -202992,7 +213586,7 @@ static int jsonEachConnect( sqlite3_vtab **ppVtab, char **pzErr ){ - sqlite3_vtab *pNew; + JsonEachConnection *pNew; int rc; /* Column numbers */ @@ -203018,28 +213612,32 @@ static int jsonEachConnect( "CREATE TABLE x(key,value,type,atom,id,parent,fullkey,path," "json HIDDEN,root HIDDEN)"); if( rc==SQLITE_OK ){ - pNew = *ppVtab = sqlite3_malloc( sizeof(*pNew) ); + pNew = (JsonEachConnection*)sqlite3DbMallocZero(db, sizeof(*pNew)); + *ppVtab = (sqlite3_vtab*)pNew; if( pNew==0 ) return SQLITE_NOMEM; - memset(pNew, 0, sizeof(*pNew)); sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); + pNew->db = db; } return rc; } /* destructor for json_each virtual table */ static int jsonEachDisconnect(sqlite3_vtab *pVtab){ - sqlite3_free(pVtab); + JsonEachConnection *p = (JsonEachConnection*)pVtab; + sqlite3DbFree(p->db, pVtab); return SQLITE_OK; } /* constructor for a JsonEachCursor object for json_each(). */ static int jsonEachOpenEach(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ + JsonEachConnection *pVtab = (JsonEachConnection*)p; JsonEachCursor *pCur; UNUSED_PARAMETER(p); - pCur = sqlite3_malloc( sizeof(*pCur) ); + pCur = sqlite3DbMallocZero(pVtab->db, sizeof(*pCur)); if( pCur==0 ) return SQLITE_NOMEM; - memset(pCur, 0, sizeof(*pCur)); + pCur->db = pVtab->db; + jsonStringZero(&pCur->path); *ppCursor = &pCur->base; return SQLITE_OK; } @@ -203057,22 +213655,24 @@ static int jsonEachOpenTree(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCursor){ /* Reset a JsonEachCursor back to its original state. Free any memory ** held. */ static void jsonEachCursorReset(JsonEachCursor *p){ - sqlite3_free(p->zJson); - sqlite3_free(p->zRoot); jsonParseReset(&p->sParse); + jsonStringReset(&p->path); + sqlite3DbFree(p->db, p->aParent); p->iRowid = 0; p->i = 0; + p->aParent = 0; + p->nParent = 0; + p->nParentAlloc = 0; p->iEnd = 0; p->eType = 0; - p->zJson = 0; - p->zRoot = 0; } /* Destructor for a jsonEachCursor object */ static int jsonEachClose(sqlite3_vtab_cursor *cur){ JsonEachCursor *p = (JsonEachCursor*)cur; jsonEachCursorReset(p); - sqlite3_free(cur); + + sqlite3DbFree(p->db, cur); return SQLITE_OK; } @@ -203083,198 +213683,233 @@ static int jsonEachEof(sqlite3_vtab_cursor *cur){ return p->i >= p->iEnd; } -/* Advance the cursor to the next element for json_tree() */ -static int jsonEachNext(sqlite3_vtab_cursor *cur){ - JsonEachCursor *p = (JsonEachCursor*)cur; - if( p->bRecursive ){ - if( p->sParse.aNode[p->i].jnFlags & JNODE_LABEL ) p->i++; - p->i++; - p->iRowid++; - if( p->iiEnd ){ - u32 iUp = p->sParse.aUp[p->i]; - JsonNode *pUp = &p->sParse.aNode[iUp]; - p->eType = pUp->eType; - if( pUp->eType==JSON_ARRAY ){ - assert( pUp->eU==0 || pUp->eU==3 ); - testcase( pUp->eU==3 ); - VVA( pUp->eU = 3 ); - if( iUp==p->i-1 ){ - pUp->u.iKey = 0; - }else{ - pUp->u.iKey++; +/* +** If the cursor is currently pointing at the label of a object entry, +** then return the index of the value. For all other cases, return the +** current pointer position, which is the value. +*/ +static int jsonSkipLabel(JsonEachCursor *p){ + if( p->eType==JSONB_OBJECT ){ + u32 sz = 0; + u32 n = jsonbPayloadSize(&p->sParse, p->i, &sz); + return p->i + n + sz; + }else{ + return p->i; + } +} + +/* +** Append the path name for the current element. +*/ +static void jsonAppendPathName(JsonEachCursor *p){ + assert( p->nParent>0 ); + assert( p->eType==JSONB_ARRAY || p->eType==JSONB_OBJECT ); + if( p->eType==JSONB_ARRAY ){ + jsonPrintf(30, &p->path, "[%lld]", p->aParent[p->nParent-1].iKey); + }else{ + u32 n, sz = 0, k, i; + const char *z; + int needQuote = 0; + n = jsonbPayloadSize(&p->sParse, p->i, &sz); + k = p->i + n; + z = (const char*)&p->sParse.aBlob[k]; + if( sz==0 || !sqlite3Isalpha(z[0]) ){ + needQuote = 1; + }else{ + for(i=0; ieType ){ - case JSON_ARRAY: { - p->i += jsonNodeSize(&p->sParse.aNode[p->i]); - p->iRowid++; - break; - } - case JSON_OBJECT: { - p->i += 1 + jsonNodeSize(&p->sParse.aNode[p->i+1]); - p->iRowid++; - break; - } - default: { - p->i = p->iEnd; - break; - } + if( needQuote ){ + jsonPrintf(sz+4,&p->path,".\"%.*s\"", sz, z); + }else{ + jsonPrintf(sz+2,&p->path,".%.*s", sz, z); } } - return SQLITE_OK; } -/* Append an object label to the JSON Path being constructed -** in pStr. -*/ -static void jsonAppendObjectPathElement( - JsonString *pStr, - JsonNode *pNode -){ - int jj, nn; - const char *z; - assert( pNode->eType==JSON_STRING ); - assert( pNode->jnFlags & JNODE_LABEL ); - assert( pNode->eU==1 ); - z = pNode->u.zJContent; - nn = pNode->n; - assert( nn>=2 ); - assert( z[0]=='"' ); - assert( z[nn-1]=='"' ); - if( nn>2 && sqlite3Isalpha(z[1]) ){ - for(jj=2; jjbRecursive ){ + u8 x; + u8 levelChange = 0; + u32 n, sz = 0; + u32 i = jsonSkipLabel(p); + x = p->sParse.aBlob[i] & 0x0f; + n = jsonbPayloadSize(&p->sParse, i, &sz); + if( x==JSONB_OBJECT || x==JSONB_ARRAY ){ + JsonParent *pParent; + if( p->nParent>=p->nParentAlloc ){ + JsonParent *pNew; + u64 nNew; + nNew = p->nParentAlloc*2 + 3; + pNew = sqlite3DbRealloc(p->db, p->aParent, sizeof(JsonParent)*nNew); + if( pNew==0 ) return SQLITE_NOMEM; + p->nParentAlloc = (u32)nNew; + p->aParent = pNew; + } + levelChange = 1; + pParent = &p->aParent[p->nParent]; + pParent->iHead = p->i; + pParent->iValue = i; + pParent->iEnd = i + n + sz; + pParent->iKey = -1; + pParent->nPath = (u32)p->path.nUsed; + if( p->eType && p->nParent ){ + jsonAppendPathName(p); + if( p->path.eErr ) rc = SQLITE_NOMEM; + } + p->nParent++; + p->i = i + n; + }else{ + p->i = i + n + sz; + } + while( p->nParent>0 && p->i >= p->aParent[p->nParent-1].iEnd ){ + p->nParent--; + p->path.nUsed = p->aParent[p->nParent].nPath; + levelChange = 1; + } + if( levelChange ){ + if( p->nParent>0 ){ + JsonParent *pParent = &p->aParent[p->nParent-1]; + u32 iVal = pParent->iValue; + p->eType = p->sParse.aBlob[iVal] & 0x0f; + }else{ + p->eType = 0; + } } + }else{ + u32 n, sz = 0; + u32 i = jsonSkipLabel(p); + n = jsonbPayloadSize(&p->sParse, i, &sz); + p->i = i + n + sz; + } + if( p->eType==JSONB_ARRAY && p->nParent ){ + p->aParent[p->nParent-1].iKey++; } - jsonPrintf(nn+2, pStr, ".%.*s", nn, z); + p->iRowid++; + return rc; } -/* Append the name of the path for element i to pStr +/* Length of the path for rowid==0 in bRecursive mode. */ -static void jsonEachComputePath( - JsonEachCursor *p, /* The cursor */ - JsonString *pStr, /* Write the path here */ - u32 i /* Path to this element */ -){ - JsonNode *pNode, *pUp; - u32 iUp; - if( i==0 ){ - jsonAppendChar(pStr, '$'); - return; - } - iUp = p->sParse.aUp[i]; - jsonEachComputePath(p, pStr, iUp); - pNode = &p->sParse.aNode[i]; - pUp = &p->sParse.aNode[iUp]; - if( pUp->eType==JSON_ARRAY ){ - assert( pUp->eU==3 || (pUp->eU==0 && pUp->u.iKey==0) ); - testcase( pUp->eU==0 ); - jsonPrintf(30, pStr, "[%d]", pUp->u.iKey); - }else{ - assert( pUp->eType==JSON_OBJECT ); - if( (pNode->jnFlags & JNODE_LABEL)==0 ) pNode--; - jsonAppendObjectPathElement(pStr, pNode); +static int jsonEachPathLength(JsonEachCursor *p){ + u32 n = p->path.nUsed; + char *z = p->path.zBuf; + if( p->iRowid==0 && p->bRecursive && n>=2 ){ + while( n>1 ){ + n--; + if( z[n]=='[' || z[n]=='.' ){ + u32 x, sz = 0; + char cSaved = z[n]; + z[n] = 0; + assert( p->sParse.eEdit==0 ); + x = jsonLookupStep(&p->sParse, 0, z+1, 0); + z[n] = cSaved; + if( JSON_LOOKUP_ISERROR(x) ) continue; + if( x + jsonbPayloadSize(&p->sParse, x, &sz) == p->i ) break; + } + } } + return n; } /* Return the value of a column */ static int jsonEachColumn( sqlite3_vtab_cursor *cur, /* The cursor */ sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ - int i /* Which column to return */ + int iColumn /* Which column to return */ ){ JsonEachCursor *p = (JsonEachCursor*)cur; - JsonNode *pThis = &p->sParse.aNode[p->i]; - switch( i ){ + switch( iColumn ){ case JEACH_KEY: { - if( p->i==0 ) break; - if( p->eType==JSON_OBJECT ){ - jsonReturn(pThis, ctx, 0); - }else if( p->eType==JSON_ARRAY ){ - u32 iKey; - if( p->bRecursive ){ - if( p->iRowid==0 ) break; - assert( p->sParse.aNode[p->sParse.aUp[p->i]].eU==3 ); - iKey = p->sParse.aNode[p->sParse.aUp[p->i]].u.iKey; + if( p->nParent==0 ){ + u32 n, j; + if( p->nRoot==1 ) break; + j = jsonEachPathLength(p); + n = p->nRoot - j; + if( n==0 ){ + break; + }else if( p->path.zBuf[j]=='[' ){ + i64 x; + sqlite3Atoi64(&p->path.zBuf[j+1], &x, n-1, SQLITE_UTF8); + sqlite3_result_int64(ctx, x); + }else if( p->path.zBuf[j+1]=='"' ){ + sqlite3_result_text(ctx, &p->path.zBuf[j+2], n-3, SQLITE_TRANSIENT); }else{ - iKey = p->iRowid; + sqlite3_result_text(ctx, &p->path.zBuf[j+1], n-1, SQLITE_TRANSIENT); } - sqlite3_result_int64(ctx, (sqlite3_int64)iKey); + break; + } + if( p->eType==JSONB_OBJECT ){ + jsonReturnFromBlob(&p->sParse, p->i, ctx, 1); + }else{ + assert( p->eType==JSONB_ARRAY ); + sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iKey); } break; } case JEACH_VALUE: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - jsonReturn(pThis, ctx, 0); + u32 i = jsonSkipLabel(p); + jsonReturnFromBlob(&p->sParse, i, ctx, 1); + if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY ){ + sqlite3_result_subtype(ctx, JSON_SUBTYPE); + } break; } case JEACH_TYPE: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - sqlite3_result_text(ctx, jsonType[pThis->eType], -1, SQLITE_STATIC); + u32 i = jsonSkipLabel(p); + u8 eType = p->sParse.aBlob[i] & 0x0f; + sqlite3_result_text(ctx, jsonbType[eType], -1, SQLITE_STATIC); break; } case JEACH_ATOM: { - if( pThis->jnFlags & JNODE_LABEL ) pThis++; - if( pThis->eType>=JSON_ARRAY ) break; - jsonReturn(pThis, ctx, 0); + u32 i = jsonSkipLabel(p); + if( (p->sParse.aBlob[i] & 0x0f)sParse, i, ctx, 1); + } break; } case JEACH_ID: { - sqlite3_result_int64(ctx, - (sqlite3_int64)p->i + ((pThis->jnFlags & JNODE_LABEL)!=0)); + sqlite3_result_int64(ctx, (sqlite3_int64)p->i); break; } case JEACH_PARENT: { - if( p->i>p->iBegin && p->bRecursive ){ - sqlite3_result_int64(ctx, (sqlite3_int64)p->sParse.aUp[p->i]); + if( p->nParent>0 && p->bRecursive ){ + sqlite3_result_int64(ctx, p->aParent[p->nParent-1].iHead); } break; } case JEACH_FULLKEY: { - JsonString x; - jsonInit(&x, ctx); - if( p->bRecursive ){ - jsonEachComputePath(p, &x, p->i); - }else{ - if( p->zRoot ){ - jsonAppendRaw(&x, p->zRoot, (int)strlen(p->zRoot)); - }else{ - jsonAppendChar(&x, '$'); - } - if( p->eType==JSON_ARRAY ){ - jsonPrintf(30, &x, "[%d]", p->iRowid); - }else if( p->eType==JSON_OBJECT ){ - jsonAppendObjectPathElement(&x, pThis); - } - } - jsonResult(&x); + u64 nBase = p->path.nUsed; + if( p->nParent ) jsonAppendPathName(p); + sqlite3_result_text64(ctx, p->path.zBuf, p->path.nUsed, + SQLITE_TRANSIENT, SQLITE_UTF8); + p->path.nUsed = nBase; break; } case JEACH_PATH: { - if( p->bRecursive ){ - JsonString x; - jsonInit(&x, ctx); - jsonEachComputePath(p, &x, p->sParse.aUp[p->i]); - jsonResult(&x); - break; - } - /* For json_each() path and root are the same so fall through - ** into the root case */ - /* no break */ deliberate_fall_through + u32 n = jsonEachPathLength(p); + sqlite3_result_text64(ctx, p->path.zBuf, n, + SQLITE_TRANSIENT, SQLITE_UTF8); + break; } default: { - const char *zRoot = p->zRoot; - if( zRoot==0 ) zRoot = "$"; - sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC); + sqlite3_result_text(ctx, p->path.zBuf, p->nRoot, SQLITE_STATIC); break; } case JEACH_JSON: { - assert( i==JEACH_JSON ); - sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_STATIC); + if( p->sParse.zJson==0 ){ + sqlite3_result_blob(ctx, p->sParse.aBlob, p->sParse.nBlob, + SQLITE_TRANSIENT); + }else{ + sqlite3_result_text(ctx, p->sParse.zJson, -1, SQLITE_TRANSIENT); + } break; } } @@ -203324,6 +213959,13 @@ static int jsonEachBestIndex( idxMask |= iMask; } } + if( pIdxInfo->nOrderBy>0 + && pIdxInfo->aOrderBy[0].iColumn<0 + && pIdxInfo->aOrderBy[0].desc==0 + ){ + pIdxInfo->orderByConsumed = 1; + } + if( (unusableMask & ~idxMask)!=0 ){ /* If there are any unusable constraints on JSON or ROOT, then reject ** this entire plan */ @@ -203358,78 +214000,97 @@ static int jsonEachFilter( int argc, sqlite3_value **argv ){ JsonEachCursor *p = (JsonEachCursor*)cur; - const char *z; const char *zRoot = 0; - sqlite3_int64 n; + u32 i, n, sz; UNUSED_PARAMETER(idxStr); UNUSED_PARAMETER(argc); jsonEachCursorReset(p); if( idxNum==0 ) return SQLITE_OK; - z = (const char*)sqlite3_value_text(argv[0]); - if( z==0 ) return SQLITE_OK; - n = sqlite3_value_bytes(argv[0]); - p->zJson = sqlite3_malloc64( n+1 ); - if( p->zJson==0 ) return SQLITE_NOMEM; - memcpy(p->zJson, z, (size_t)n+1); - if( jsonParse(&p->sParse, 0, p->zJson) ){ - int rc = SQLITE_NOMEM; - if( p->sParse.oom==0 ){ - sqlite3_free(cur->pVtab->zErrMsg); - cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON"); - if( cur->pVtab->zErrMsg ) rc = SQLITE_ERROR; + memset(&p->sParse, 0, sizeof(p->sParse)); + p->sParse.nJPRef = 1; + p->sParse.db = p->db; + if( jsonFuncArgMightBeBinary(argv[0]) ){ + p->sParse.nBlob = sqlite3_value_bytes(argv[0]); + p->sParse.aBlob = (u8*)sqlite3_value_blob(argv[0]); + }else{ + p->sParse.zJson = (char*)sqlite3_value_text(argv[0]); + p->sParse.nJson = sqlite3_value_bytes(argv[0]); + if( p->sParse.zJson==0 ){ + p->i = p->iEnd = 0; + return SQLITE_OK; } - jsonEachCursorReset(p); - return rc; - }else if( p->bRecursive && jsonParseFindParents(&p->sParse) ){ - jsonEachCursorReset(p); - return SQLITE_NOMEM; - }else{ - JsonNode *pNode = 0; - if( idxNum==3 ){ - const char *zErr = 0; - zRoot = (const char*)sqlite3_value_text(argv[1]); - if( zRoot==0 ) return SQLITE_OK; - n = sqlite3_value_bytes(argv[1]); - p->zRoot = sqlite3_malloc64( n+1 ); - if( p->zRoot==0 ) return SQLITE_NOMEM; - memcpy(p->zRoot, zRoot, (size_t)n+1); - if( zRoot[0]!='$' ){ - zErr = zRoot; - }else{ - pNode = jsonLookupStep(&p->sParse, 0, p->zRoot+1, 0, &zErr); + if( jsonConvertTextToBlob(&p->sParse, 0) ){ + if( p->sParse.oom ){ + return SQLITE_NOMEM; } - if( zErr ){ + goto json_each_malformed_input; + } + } + if( idxNum==3 ){ + zRoot = (const char*)sqlite3_value_text(argv[1]); + if( zRoot==0 ) return SQLITE_OK; + if( zRoot[0]!='$' ){ + sqlite3_free(cur->pVtab->zErrMsg); + cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot); + jsonEachCursorReset(p); + return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; + } + p->nRoot = sqlite3Strlen30(zRoot); + if( zRoot[1]==0 ){ + i = p->i = 0; + p->eType = 0; + }else{ + i = jsonLookupStep(&p->sParse, 0, zRoot+1, 0); + if( JSON_LOOKUP_ISERROR(i) ){ + if( i==JSON_LOOKUP_NOTFOUND ){ + p->i = 0; + p->eType = 0; + p->iEnd = 0; + return SQLITE_OK; + } sqlite3_free(cur->pVtab->zErrMsg); - cur->pVtab->zErrMsg = jsonPathSyntaxError(zErr); + cur->pVtab->zErrMsg = jsonBadPathError(0, zRoot); jsonEachCursorReset(p); return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; - }else if( pNode==0 ){ - return SQLITE_OK; } - }else{ - pNode = p->sParse.aNode; - } - p->iBegin = p->i = (int)(pNode - p->sParse.aNode); - p->eType = pNode->eType; - if( p->eType>=JSON_ARRAY ){ - assert( pNode->eU==0 ); - VVA( pNode->eU = 3 ); - pNode->u.iKey = 0; - p->iEnd = p->i + pNode->n + 1; - if( p->bRecursive ){ - p->eType = p->sParse.aNode[p->sParse.aUp[p->i]].eType; - if( p->i>0 && (p->sParse.aNode[p->i-1].jnFlags & JNODE_LABEL)!=0 ){ - p->i--; - } + if( p->sParse.iLabel ){ + p->i = p->sParse.iLabel; + p->eType = JSONB_OBJECT; }else{ - p->i++; - } - }else{ - p->iEnd = p->i+1; - } + p->i = i; + p->eType = JSONB_ARRAY; + } + } + jsonAppendRaw(&p->path, zRoot, p->nRoot); + }else{ + i = p->i = 0; + p->eType = 0; + p->nRoot = 1; + jsonAppendRaw(&p->path, "$", 1); + } + p->nParent = 0; + n = jsonbPayloadSize(&p->sParse, i, &sz); + p->iEnd = i+n+sz; + if( (p->sParse.aBlob[i] & 0x0f)>=JSONB_ARRAY && !p->bRecursive ){ + p->i = i + n; + p->eType = p->sParse.aBlob[i] & 0x0f; + p->aParent = sqlite3DbMallocZero(p->db, sizeof(JsonParent)); + if( p->aParent==0 ) return SQLITE_NOMEM; + p->nParent = 1; + p->nParentAlloc = 1; + p->aParent[0].iKey = 0; + p->aParent[0].iEnd = p->iEnd; + p->aParent[0].iHead = p->i; + p->aParent[0].iValue = i; } return SQLITE_OK; + +json_each_malformed_input: + sqlite3_free(cur->pVtab->zErrMsg); + cur->pVtab->zErrMsg = sqlite3_mprintf("malformed JSON"); + jsonEachCursorReset(p); + return cur->pVtab->zErrMsg ? SQLITE_ERROR : SQLITE_NOMEM; } /* The methods of the json_each virtual table */ @@ -203457,7 +214118,8 @@ static sqlite3_module jsonEachModule = { 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; /* The methods of the json_tree virtual table. */ @@ -203485,7 +214147,8 @@ static sqlite3_module jsonTreeModule = { 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; #endif /* SQLITE_OMIT_VIRTUALTABLE */ #endif /* !defined(SQLITE_OMIT_JSON) */ @@ -203496,33 +214159,57 @@ static sqlite3_module jsonTreeModule = { SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){ #ifndef SQLITE_OMIT_JSON static FuncDef aJsonFunc[] = { - JFUNCTION(json, 1, 0, jsonRemoveFunc), - JFUNCTION(json_array, -1, 0, jsonArrayFunc), - JFUNCTION(json_array_length, 1, 0, jsonArrayLengthFunc), - JFUNCTION(json_array_length, 2, 0, jsonArrayLengthFunc), - JFUNCTION(json_extract, -1, 0, jsonExtractFunc), - JFUNCTION(->, 2, JSON_JSON, jsonExtractFunc), - JFUNCTION(->>, 2, JSON_SQL, jsonExtractFunc), - JFUNCTION(json_insert, -1, 0, jsonSetFunc), - JFUNCTION(json_object, -1, 0, jsonObjectFunc), - JFUNCTION(json_patch, 2, 0, jsonPatchFunc), - JFUNCTION(json_quote, 1, 0, jsonQuoteFunc), - JFUNCTION(json_remove, -1, 0, jsonRemoveFunc), - JFUNCTION(json_replace, -1, 0, jsonReplaceFunc), - JFUNCTION(json_set, -1, JSON_ISSET, jsonSetFunc), - JFUNCTION(json_type, 1, 0, jsonTypeFunc), - JFUNCTION(json_type, 2, 0, jsonTypeFunc), - JFUNCTION(json_valid, 1, 0, jsonValidFunc), + /* sqlite3_result_subtype() ----, ,--- sqlite3_value_subtype() */ + /* | | */ + /* Uses cache ------, | | ,---- Returns JSONB */ + /* | | | | */ + /* Number of arguments ---, | | | | ,--- Flags */ + /* | | | | | | */ + JFUNCTION(json, 1,1,1, 0,0,0, jsonRemoveFunc), + JFUNCTION(jsonb, 1,1,0, 0,1,0, jsonRemoveFunc), + JFUNCTION(json_array, -1,0,1, 1,0,0, jsonArrayFunc), + JFUNCTION(jsonb_array, -1,0,1, 1,1,0, jsonArrayFunc), + JFUNCTION(json_array_length, 1,1,0, 0,0,0, jsonArrayLengthFunc), + JFUNCTION(json_array_length, 2,1,0, 0,0,0, jsonArrayLengthFunc), + JFUNCTION(json_error_position,1,1,0, 0,0,0, jsonErrorFunc), + JFUNCTION(json_extract, -1,1,1, 0,0,0, jsonExtractFunc), + JFUNCTION(jsonb_extract, -1,1,0, 0,1,0, jsonExtractFunc), + JFUNCTION(->, 2,1,1, 0,0,JSON_JSON, jsonExtractFunc), + JFUNCTION(->>, 2,1,0, 0,0,JSON_SQL, jsonExtractFunc), + JFUNCTION(json_insert, -1,1,1, 1,0,0, jsonSetFunc), + JFUNCTION(jsonb_insert, -1,1,0, 1,1,0, jsonSetFunc), + JFUNCTION(json_object, -1,0,1, 1,0,0, jsonObjectFunc), + JFUNCTION(jsonb_object, -1,0,1, 1,1,0, jsonObjectFunc), + JFUNCTION(json_patch, 2,1,1, 0,0,0, jsonPatchFunc), + JFUNCTION(jsonb_patch, 2,1,0, 0,1,0, jsonPatchFunc), + JFUNCTION(json_quote, 1,0,1, 1,0,0, jsonQuoteFunc), + JFUNCTION(json_remove, -1,1,1, 0,0,0, jsonRemoveFunc), + JFUNCTION(jsonb_remove, -1,1,0, 0,1,0, jsonRemoveFunc), + JFUNCTION(json_replace, -1,1,1, 1,0,0, jsonReplaceFunc), + JFUNCTION(jsonb_replace, -1,1,0, 1,1,0, jsonReplaceFunc), + JFUNCTION(json_set, -1,1,1, 1,0,JSON_ISSET, jsonSetFunc), + JFUNCTION(jsonb_set, -1,1,0, 1,1,JSON_ISSET, jsonSetFunc), + JFUNCTION(json_type, 1,1,0, 0,0,0, jsonTypeFunc), + JFUNCTION(json_type, 2,1,0, 0,0,0, jsonTypeFunc), + JFUNCTION(json_valid, 1,1,0, 0,0,0, jsonValidFunc), + JFUNCTION(json_valid, 2,1,0, 0,0,0, jsonValidFunc), #if SQLITE_DEBUG - JFUNCTION(json_parse, 1, 0, jsonParseFunc), - JFUNCTION(json_test1, 1, 0, jsonTest1Func), + JFUNCTION(json_parse, 1,1,0, 0,0,0, jsonParseFunc), #endif WAGGREGATE(json_group_array, 1, 0, 0, jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS), + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC), + WAGGREGATE(jsonb_group_array, 1, JSON_BLOB, 0, + jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), WAGGREGATE(json_group_object, 2, 0, 0, jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, - SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS) + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC), + WAGGREGATE(jsonb_group_object,2, JSON_BLOB, 0, + jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse, + SQLITE_SUBTYPE|SQLITE_RESULT_SUBTYPE|SQLITE_UTF8| + SQLITE_DETERMINISTIC) }; sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc)); #endif @@ -203649,6 +214336,11 @@ typedef unsigned int u32; #endif #endif /* !defined(SQLITE_AMALGAMATION) */ +/* Macro to check for 4-byte alignment. Only used inside of assert() */ +#ifdef SQLITE_DEBUG +# define FOUR_BYTE_ALIGNED(X) ((((char*)(X) - (char*)0) & 3)==0) +#endif + /* #include */ /* #include */ /* #include */ @@ -203714,6 +214406,7 @@ struct Rtree { int iDepth; /* Current depth of the r-tree structure */ char *zDb; /* Name of database containing r-tree table */ char *zName; /* Name of r-tree table */ + char *zNodeName; /* Name of the %_node table */ u32 nBusy; /* Current number of users of this structure */ i64 nRowEst; /* Estimated number of rows in this table */ u32 nCursor; /* Number of open cursors */ @@ -203726,7 +214419,6 @@ struct Rtree { ** headed by the node (leaf nodes have RtreeNode.iNode==0). */ RtreeNode *pDeleted; - int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */ /* Blob I/O on xxx_node */ sqlite3_blob *pNodeBlob; @@ -204023,17 +214715,23 @@ struct RtreeMatchArg { ** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined ** at run-time. */ -#ifndef SQLITE_BYTEORDER -#if defined(i386) || defined(__i386__) || defined(_M_IX86) || \ - defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ - defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ - defined(__arm__) -# define SQLITE_BYTEORDER 1234 -#elif defined(sparc) || defined(__ppc__) -# define SQLITE_BYTEORDER 4321 -#else -# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */ -#endif +#ifndef SQLITE_BYTEORDER /* Replicate changes at tag-20230904a */ +# if defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_BIG_ENDIAN__ +# define SQLITE_BYTEORDER 4321 +# elif defined(__BYTE_ORDER__) && __BYTE_ORDER__==__ORDER_LITTLE_ENDIAN__ +# define SQLITE_BYTEORDER 1234 +# elif defined(__BIG_ENDIAN__) && __BIG_ENDIAN__==1 +# define SQLITE_BYTEORDER 4321 +# elif defined(i386) || defined(__i386__) || defined(_M_IX86) || \ + defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \ + defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \ + defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64) +# define SQLITE_BYTEORDER 1234 +# elif defined(sparc) || defined(__ARMEB__) || defined(__AARCH64EB__) +# define SQLITE_BYTEORDER 4321 +# else +# define SQLITE_BYTEORDER 0 +# endif #endif @@ -204054,7 +214752,7 @@ static int readInt16(u8 *p){ return (p[0]<<8) + p[1]; } static void readCoord(u8 *p, RtreeCoord *pCoord){ - assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(p) ); #if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300 pCoord->u = _byteswap_ulong(*(u32*)p); #elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000 @@ -204108,7 +214806,7 @@ static void writeInt16(u8 *p, int i){ } static int writeCoord(u8 *p, RtreeCoord *pCoord){ u32 i; - assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(p) ); assert( sizeof(RtreeCoord)==4 ); assert( sizeof(u32)==4 ); #if SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000 @@ -204236,11 +214934,9 @@ static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){ ** Clear the Rtree.pNodeBlob object */ static void nodeBlobReset(Rtree *pRtree){ - if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){ - sqlite3_blob *pBlob = pRtree->pNodeBlob; - pRtree->pNodeBlob = 0; - sqlite3_blob_close(pBlob); - } + sqlite3_blob *pBlob = pRtree->pNodeBlob; + pRtree->pNodeBlob = 0; + sqlite3_blob_close(pBlob); } /* @@ -204259,7 +214955,7 @@ static int nodeAcquire( ** increase its reference count and return it. */ if( (pNode = nodeHashLookup(pRtree, iNode))!=0 ){ - if( pParent && pParent!=pNode->pParent ){ + if( pParent && ALWAYS(pParent!=pNode->pParent) ){ RTREE_IS_CORRUPT(pRtree); return SQLITE_CORRUPT_VTAB; } @@ -204279,14 +214975,11 @@ static int nodeAcquire( } } if( pRtree->pNodeBlob==0 ){ - char *zTab = sqlite3_mprintf("%s_node", pRtree->zName); - if( zTab==0 ) return SQLITE_NOMEM; - rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0, + rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, pRtree->zNodeName, + "data", iNode, 0, &pRtree->pNodeBlob); - sqlite3_free(zTab); } if( rc ){ - nodeBlobReset(pRtree); *ppNode = 0; /* If unable to open an sqlite3_blob on the desired row, that can only ** be because the shadow tables hold erroneous data. */ @@ -204346,6 +215039,7 @@ static int nodeAcquire( } *ppNode = pNode; }else{ + nodeBlobReset(pRtree); if( pNode ){ pRtree->nNodeRef--; sqlite3_free(pNode); @@ -204490,6 +215184,7 @@ static void nodeGetCoord( int iCoord, /* Which coordinate to extract */ RtreeCoord *pCoord /* OUT: Space to write result to */ ){ + assert( iCellzData[12 + pRtree->nBytesPerCell*iCell + 4*iCoord], pCoord); } @@ -204679,7 +215374,9 @@ static int rtreeClose(sqlite3_vtab_cursor *cur){ sqlite3_finalize(pCsr->pReadAux); sqlite3_free(pCsr); pRtree->nCursor--; - nodeBlobReset(pRtree); + if( pRtree->nCursor==0 && pRtree->inWrTrans==0 ){ + nodeBlobReset(pRtree); + } return SQLITE_OK; } @@ -204836,7 +215533,7 @@ static void rtreeNonleafConstraint( assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE || p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE || p->op==RTREE_FALSE ); - assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(pCellData) ); switch( p->op ){ case RTREE_TRUE: return; /* Always satisfied */ case RTREE_FALSE: break; /* Never satisfied */ @@ -204889,7 +215586,7 @@ static void rtreeLeafConstraint( || p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE || p->op==RTREE_FALSE ); pCellData += 8 + p->iCoord*4; - assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */ + assert( FOUR_BYTE_ALIGNED(pCellData) ); RTREE_DECODE_COORD(eInt, pCellData, xN); switch( p->op ){ case RTREE_TRUE: return; /* Always satisfied */ @@ -205264,7 +215961,11 @@ static int rtreeRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *pRowid){ int rc = SQLITE_OK; RtreeNode *pNode = rtreeNodeOfFirstSearchPoint(pCsr, &rc); if( rc==SQLITE_OK && ALWAYS(p) ){ - *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); + if( p->iCell>=NCELL(pNode) ){ + rc = SQLITE_ABORT; + }else{ + *pRowid = nodeGetRowid(RTREE_OF_CURSOR(pCsr), pNode, p->iCell); + } } return rc; } @@ -205282,6 +215983,7 @@ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){ if( rc ) return rc; if( NEVER(p==0) ) return SQLITE_OK; + if( p->iCell>=NCELL(pNode) ) return SQLITE_ABORT; if( i==0 ){ sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell)); }else if( i<=pRtree->nDim2 ){ @@ -205459,7 +216161,20 @@ static int rtreeFilter( p->pInfo->nCoord = pRtree->nDim2; p->pInfo->anQueue = pCsr->anQueue; p->pInfo->mxLevel = pRtree->iDepth + 1; - }else if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + }else if( eType==SQLITE_INTEGER ){ + sqlite3_int64 iVal = sqlite3_value_int64(argv[ii]); +#ifdef SQLITE_RTREE_INT_ONLY + p->u.rValue = iVal; +#else + p->u.rValue = (double)iVal; + if( iVal>=((sqlite3_int64)1)<<48 + || iVal<=-(((sqlite3_int64)1)<<48) + ){ + if( p->op==RTREE_LT ) p->op = RTREE_LE; + if( p->op==RTREE_GT ) p->op = RTREE_GE; + } +#endif + }else if( eType==SQLITE_FLOAT ){ #ifdef SQLITE_RTREE_INT_ONLY p->u.rValue = sqlite3_value_int64(argv[ii]); #else @@ -205590,11 +216305,12 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ || p->op==SQLITE_INDEX_CONSTRAINT_MATCH) ){ u8 op; + u8 doOmit = 1; switch( p->op ){ - case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; break; - case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; break; + case SQLITE_INDEX_CONSTRAINT_EQ: op = RTREE_EQ; doOmit = 0; break; + case SQLITE_INDEX_CONSTRAINT_GT: op = RTREE_GT; doOmit = 0; break; case SQLITE_INDEX_CONSTRAINT_LE: op = RTREE_LE; break; - case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; break; + case SQLITE_INDEX_CONSTRAINT_LT: op = RTREE_LT; doOmit = 0; break; case SQLITE_INDEX_CONSTRAINT_GE: op = RTREE_GE; break; case SQLITE_INDEX_CONSTRAINT_MATCH: op = RTREE_MATCH; break; default: op = 0; break; @@ -205603,15 +216319,19 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ zIdxStr[iIdx++] = op; zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0'); pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2); - pIdxInfo->aConstraintUsage[ii].omit = 1; + pIdxInfo->aConstraintUsage[ii].omit = doOmit; } } } pIdxInfo->idxNum = 2; pIdxInfo->needToFreeIdxStr = 1; - if( iIdx>0 && 0==(pIdxInfo->idxStr = sqlite3_mprintf("%s", zIdxStr)) ){ - return SQLITE_NOMEM; + if( iIdx>0 ){ + pIdxInfo->idxStr = sqlite3_malloc( iIdx+1 ); + if( pIdxInfo->idxStr==0 ){ + return SQLITE_NOMEM; + } + memcpy(pIdxInfo->idxStr, zIdxStr, iIdx+1); } nRow = pRtree->nRowEst >> (iIdx/2); @@ -205690,31 +216410,22 @@ static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ */ static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){ int ii; - int isInt = (pRtree->eCoordType==RTREE_COORD_INT32); - for(ii=0; iinDim2; ii+=2){ - RtreeCoord *a1 = &p1->aCoord[ii]; - RtreeCoord *a2 = &p2->aCoord[ii]; - if( (!isInt && (a2[0].fa1[1].f)) - || ( isInt && (a2[0].ia1[1].i)) - ){ - return 0; + if( pRtree->eCoordType==RTREE_COORD_INT32 ){ + for(ii=0; iinDim2; ii+=2){ + RtreeCoord *a1 = &p1->aCoord[ii]; + RtreeCoord *a2 = &p2->aCoord[ii]; + if( a2[0].ia1[1].i ) return 0; + } + }else{ + for(ii=0; iinDim2; ii+=2){ + RtreeCoord *a1 = &p1->aCoord[ii]; + RtreeCoord *a2 = &p2->aCoord[ii]; + if( a2[0].fa1[1].f ) return 0; } } return 1; } -/* -** Return the amount cell p would grow by if it were unioned with pCell. -*/ -static RtreeDValue cellGrowth(Rtree *pRtree, RtreeCell *p, RtreeCell *pCell){ - RtreeDValue area; - RtreeCell cell; - memcpy(&cell, p, sizeof(RtreeCell)); - area = cellArea(pRtree, &cell); - cellUnion(pRtree, &cell, pCell); - return (cellArea(pRtree, &cell)-area); -} - static RtreeDValue cellOverlap( Rtree *pRtree, RtreeCell *p, @@ -205761,38 +216472,52 @@ static int ChooseLeaf( for(ii=0; rc==SQLITE_OK && ii<(pRtree->iDepth-iHeight); ii++){ int iCell; sqlite3_int64 iBest = 0; - + int bFound = 0; RtreeDValue fMinGrowth = RTREE_ZERO; RtreeDValue fMinArea = RTREE_ZERO; - int nCell = NCELL(pNode); - RtreeCell cell; RtreeNode *pChild = 0; - RtreeCell *aCell = 0; - - /* Select the child node which will be enlarged the least if pCell - ** is inserted into it. Resolve ties by choosing the entry with - ** the smallest area. + /* First check to see if there is are any cells in pNode that completely + ** contains pCell. If two or more cells in pNode completely contain pCell + ** then pick the smallest. */ for(iCell=0; iCell1 ){ - int iLeft = 0; - int iRight = 0; - - int nLeft = nIdx/2; - int nRight = nIdx-nLeft; - int *aLeft = aIdx; - int *aRight = &aIdx[nLeft]; - - SortByDistance(aLeft, nLeft, aDistance, aSpare); - SortByDistance(aRight, nRight, aDistance, aSpare); - - memcpy(aSpare, aLeft, sizeof(int)*nLeft); - aLeft = aSpare; - - while( iLeftnDim; iDim++){ - aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2]); - aCenterCoord[iDim] += DCOORD(aCell[ii].aCoord[iDim*2+1]); - } - } - for(iDim=0; iDimnDim; iDim++){ - aCenterCoord[iDim] = (aCenterCoord[iDim]/(nCell*(RtreeDValue)2)); - } - - for(ii=0; iinDim; iDim++){ - RtreeDValue coord = (DCOORD(aCell[ii].aCoord[iDim*2+1]) - - DCOORD(aCell[ii].aCoord[iDim*2])); - aDistance[ii] += (coord-aCenterCoord[iDim])*(coord-aCenterCoord[iDim]); - } - } - - SortByDistance(aOrder, nCell, aDistance, aSpare); - nodeZero(pRtree, pNode); - - for(ii=0; rc==SQLITE_OK && ii<(nCell-(RTREE_MINCELLS(pRtree)+1)); ii++){ - RtreeCell *p = &aCell[aOrder[ii]]; - nodeInsertCell(pRtree, pNode, p); - if( p->iRowid==pCell->iRowid ){ - if( iHeight==0 ){ - rc = rowidWrite(pRtree, p->iRowid, pNode->iNode); - }else{ - rc = parentWrite(pRtree, p->iRowid, pNode->iNode); - } - } - } - if( rc==SQLITE_OK ){ - rc = fixBoundingBox(pRtree, pNode); - } - for(; rc==SQLITE_OK && iiiNode currently contains - ** the height of the sub-tree headed by the cell. - */ - RtreeNode *pInsert; - RtreeCell *p = &aCell[aOrder[ii]]; - rc = ChooseLeaf(pRtree, p, iHeight, &pInsert); - if( rc==SQLITE_OK ){ - int rc2; - rc = rtreeInsertCell(pRtree, pInsert, p, iHeight); - rc2 = nodeRelease(pRtree, pInsert); - if( rc==SQLITE_OK ){ - rc = rc2; - } - } - } - - sqlite3_free(aCell); - return rc; -} - /* ** Insert cell pCell into node pNode. Node pNode is the head of a ** subtree iHeight high (leaf nodes have iHeight==0). @@ -206541,12 +217094,7 @@ static int rtreeInsertCell( } } if( nodeInsertCell(pRtree, pNode, pCell) ){ - if( iHeight<=pRtree->iReinsertHeight || pNode->iNode==1){ - rc = SplitNode(pRtree, pNode, pCell, iHeight); - }else{ - pRtree->iReinsertHeight = iHeight; - rc = Reinsert(pRtree, pNode, pCell, iHeight); - } + rc = SplitNode(pRtree, pNode, pCell, iHeight); }else{ rc = AdjustTree(pRtree, pNode, pCell); if( ALWAYS(rc==SQLITE_OK) ){ @@ -206788,7 +217336,7 @@ static int rtreeUpdate( rtreeReference(pRtree); assert(nData>=1); - cell.iRowid = 0; /* Used only to suppress a compiler warning */ + memset(&cell, 0, sizeof(cell)); /* Constraint handling. A write operation on an r-tree table may return ** SQLITE_CONSTRAINT for two reasons: @@ -206889,7 +217437,6 @@ static int rtreeUpdate( } if( rc==SQLITE_OK ){ int rc2; - pRtree->iReinsertHeight = -1; rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); rc2 = nodeRelease(pRtree, pLeaf); if( rc==SQLITE_OK ){ @@ -206918,8 +217465,7 @@ static int rtreeUpdate( */ static int rtreeBeginTransaction(sqlite3_vtab *pVtab){ Rtree *pRtree = (Rtree *)pVtab; - assert( pRtree->inWrTrans==0 ); - pRtree->inWrTrans++; + pRtree->inWrTrans = 1; return SQLITE_OK; } @@ -206933,6 +217479,9 @@ static int rtreeEndTransaction(sqlite3_vtab *pVtab){ nodeBlobReset(pRtree); return SQLITE_OK; } +static int rtreeRollback(sqlite3_vtab *pVtab){ + return rtreeEndTransaction(pVtab); +} /* ** The xRename method for rtree module virtual tables. @@ -207030,8 +217579,11 @@ static int rtreeShadowName(const char *zName){ return 0; } +/* Forward declaration */ +static int rtreeIntegrity(sqlite3_vtab*, const char*, const char*, int, char**); + static sqlite3_module rtreeModule = { - 3, /* iVersion */ + 4, /* iVersion */ rtreeCreate, /* xCreate - create a table */ rtreeConnect, /* xConnect - connect to an existing table */ rtreeBestIndex, /* xBestIndex - Determine search strategy */ @@ -207048,13 +217600,14 @@ static sqlite3_module rtreeModule = { rtreeBeginTransaction, /* xBegin - begin transaction */ rtreeEndTransaction, /* xSync - sync transaction */ rtreeEndTransaction, /* xCommit - commit transaction */ - rtreeEndTransaction, /* xRollback - rollback transaction */ + rtreeRollback, /* xRollback - rollback transaction */ 0, /* xFindFunction - function overloading */ rtreeRename, /* xRename - rename the table */ rtreeSavepoint, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - rtreeShadowName /* xShadowName */ + rtreeShadowName, /* xShadowName */ + rtreeIntegrity /* xIntegrity */ }; static int rtreeSqlInit( @@ -207147,7 +217700,7 @@ static int rtreeSqlInit( } sqlite3_free(zSql); } - if( pRtree->nAux ){ + if( pRtree->nAux && rc!=SQLITE_NOMEM ){ pRtree->zReadAuxSql = sqlite3_mprintf( "SELECT * FROM \"%w\".\"%w_rowid\" WHERE rowid=?1", zDb, zPrefix); @@ -207310,22 +217863,27 @@ static int rtreeInit( } sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); + /* Allocate the sqlite3_vtab structure */ nDb = (int)strlen(argv[1]); nName = (int)strlen(argv[2]); - pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2); + pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8); if( !pRtree ){ return SQLITE_NOMEM; } - memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); + memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8); pRtree->nBusy = 1; pRtree->base.pModule = &rtreeModule; pRtree->zDb = (char *)&pRtree[1]; pRtree->zName = &pRtree->zDb[nDb+1]; + pRtree->zNodeName = &pRtree->zName[nName+1]; pRtree->eCoordType = (u8)eCoordType; memcpy(pRtree->zDb, argv[1], nDb); memcpy(pRtree->zName, argv[2], nName); + memcpy(pRtree->zNodeName, argv[2], nName); + memcpy(&pRtree->zNodeName[nName], "_node", 6); /* Create/Connect to the underlying relational database schema. If @@ -207822,7 +218380,6 @@ static int rtreeCheckTable( ){ RtreeCheck check; /* Common context for various routines */ sqlite3_stmt *pStmt = 0; /* Used to find column count of rtree table */ - int bEnd = 0; /* True if transaction should be closed */ int nAux = 0; /* Number of extra columns. */ /* Initialize the context object */ @@ -207831,24 +218388,14 @@ static int rtreeCheckTable( check.zDb = zDb; check.zTab = zTab; - /* If there is not already an open transaction, open one now. This is - ** to ensure that the queries run as part of this integrity-check operate - ** on a consistent snapshot. */ - if( sqlite3_get_autocommit(db) ){ - check.rc = sqlite3_exec(db, "BEGIN", 0, 0, 0); - bEnd = 1; - } - /* Find the number of auxiliary columns */ - if( check.rc==SQLITE_OK ){ - pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab); - if( pStmt ){ - nAux = sqlite3_column_count(pStmt) - 2; - sqlite3_finalize(pStmt); - }else - if( check.rc!=SQLITE_NOMEM ){ - check.rc = SQLITE_OK; - } + pStmt = rtreeCheckPrepare(&check, "SELECT * FROM %Q.'%q_rowid'", zDb, zTab); + if( pStmt ){ + nAux = sqlite3_column_count(pStmt) - 2; + sqlite3_finalize(pStmt); + }else + if( check.rc!=SQLITE_NOMEM ){ + check.rc = SQLITE_OK; } /* Find number of dimensions in the rtree table. */ @@ -207879,15 +218426,35 @@ static int rtreeCheckTable( sqlite3_finalize(check.aCheckMapping[0]); sqlite3_finalize(check.aCheckMapping[1]); - /* If one was opened, close the transaction */ - if( bEnd ){ - int rc = sqlite3_exec(db, "END", 0, 0, 0); - if( check.rc==SQLITE_OK ) check.rc = rc; - } *pzReport = check.zReport; return check.rc; } +/* +** Implementation of the xIntegrity method for Rtree. +*/ +static int rtreeIntegrity( + sqlite3_vtab *pVtab, /* The virtual table to check */ + const char *zSchema, /* Schema in which the virtual table lives */ + const char *zName, /* Name of the virtual table */ + int isQuick, /* True for a quick_check */ + char **pzErr /* Write results here */ +){ + Rtree *pRtree = (Rtree*)pVtab; + int rc; + assert( pzErr!=0 && *pzErr==0 ); + UNUSED_PARAMETER(zSchema); + UNUSED_PARAMETER(zName); + UNUSED_PARAMETER(isQuick); + rc = rtreeCheckTable(pRtree->db, pRtree->zDb, pRtree->zName, pzErr); + if( rc==SQLITE_OK && *pzErr ){ + *pzErr = sqlite3_mprintf("In RTree %s.%s:\n%z", + pRtree->zDb, pRtree->zName, *pzErr); + if( (*pzErr)==0 ) rc = SQLITE_NOMEM; + } + return rc; +} + /* ** Usage: ** @@ -208261,7 +218828,7 @@ static GeoPoly *geopolyFuncParam( int nByte; testcase( pCtx==0 ); if( sqlite3_value_type(pVal)==SQLITE_BLOB - && (nByte = sqlite3_value_bytes(pVal))>=(4+6*sizeof(GeoCoord)) + && (nByte = sqlite3_value_bytes(pVal))>=(int)(4+6*sizeof(GeoCoord)) ){ const unsigned char *a = sqlite3_value_blob(pVal); int nVertex; @@ -208319,6 +218886,7 @@ static void geopolyBlobFunc( sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ sqlite3_result_blob(context, p->hdr, 4+8*p->nVertex, SQLITE_TRANSIENT); @@ -208338,6 +218906,7 @@ static void geopolyJsonFunc( sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ sqlite3 *db = sqlite3_context_db_handle(context); sqlite3_str *x = sqlite3_str_new(db); @@ -208419,6 +218988,7 @@ static void geopolyXformFunc( double F = sqlite3_value_double(argv[6]); GeoCoord x1, y1, x0, y0; int ii; + (void)argc; if( p ){ for(ii=0; iinVertex; ii++){ x0 = GeoX(p,ii); @@ -208469,6 +219039,7 @@ static void geopolyAreaFunc( sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ sqlite3_result_double(context, geopolyArea(p)); sqlite3_free(p); @@ -208494,6 +219065,7 @@ static void geopolyCcwFunc( sqlite3_value **argv ){ GeoPoly *p = geopolyFuncParam(context, argv[0], 0); + (void)argc; if( p ){ if( geopolyArea(p)<0.0 ){ int ii, jj; @@ -208548,6 +219120,7 @@ static void geopolyRegularFunc( int n = sqlite3_value_int(argv[3]); int i; GeoPoly *p; + (void)argc; if( n<3 || r<=0.0 ) return; if( n>1000 ) n = 1000; @@ -208657,6 +219230,7 @@ static void geopolyBBoxFunc( sqlite3_value **argv ){ GeoPoly *p = geopolyBBox(context, argv[0], 0, 0); + (void)argc; if( p ){ sqlite3_result_blob(context, p->hdr, 4+8*p->nVertex, SQLITE_TRANSIENT); @@ -208684,6 +219258,7 @@ static void geopolyBBoxStep( ){ RtreeCoord a[4]; int rc = SQLITE_OK; + (void)argc; (void)geopolyBBox(context, argv[0], a, &rc); if( rc==SQLITE_OK ){ GeoBBox *pBBox; @@ -208772,6 +219347,8 @@ static void geopolyContainsPointFunc( int v = 0; int cnt = 0; int ii; + (void)argc; + if( p1==0 ) return; for(ii=0; iinVertex-1; ii++){ v = pointBeneathLine(x0,y0,GeoX(p1,ii), GeoY(p1,ii), @@ -208811,6 +219388,7 @@ static void geopolyWithinFunc( ){ GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0); GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0); + (void)argc; if( p1 && p2 ){ int x = geopolyOverlap(p1, p2); if( x<0 ){ @@ -209141,6 +219719,7 @@ static void geopolyOverlapFunc( ){ GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0); GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0); + (void)argc; if( p1 && p2 ){ int x = geopolyOverlap(p1, p2); if( x<0 ){ @@ -209161,8 +219740,12 @@ static void geopolyDebugFunc( int argc, sqlite3_value **argv ){ + (void)context; + (void)argc; #ifdef GEOPOLY_ENABLE_DEBUG geo_debug = sqlite3_value_int(argv[0]); +#else + (void)argv; #endif } @@ -209190,26 +219773,31 @@ static int geopolyInit( sqlite3_str *pSql; char *zSql; int ii; + (void)pAux; sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); /* Allocate the sqlite3_vtab structure */ nDb = strlen(argv[1]); nName = strlen(argv[2]); - pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName+2); + pRtree = (Rtree *)sqlite3_malloc64(sizeof(Rtree)+nDb+nName*2+8); if( !pRtree ){ return SQLITE_NOMEM; } - memset(pRtree, 0, sizeof(Rtree)+nDb+nName+2); + memset(pRtree, 0, sizeof(Rtree)+nDb+nName*2+8); pRtree->nBusy = 1; pRtree->base.pModule = &rtreeModule; pRtree->zDb = (char *)&pRtree[1]; pRtree->zName = &pRtree->zDb[nDb+1]; + pRtree->zNodeName = &pRtree->zName[nName+1]; pRtree->eCoordType = RTREE_COORD_REAL32; pRtree->nDim = 2; pRtree->nDim2 = 4; memcpy(pRtree->zDb, argv[1], nDb); memcpy(pRtree->zName, argv[2], nName); + memcpy(pRtree->zNodeName, argv[2], nName); + memcpy(&pRtree->zNodeName[nName], "_node", 6); /* Create/Connect to the underlying relational database schema. If @@ -209306,6 +219894,7 @@ static int geopolyFilter( RtreeNode *pRoot = 0; int rc = SQLITE_OK; int iCell = 0; + (void)idxStr; rtreeReference(pRtree); @@ -209432,6 +220021,7 @@ static int geopolyBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ int iRowidTerm = -1; int iFuncTerm = -1; int idxNum = 0; + (void)tab; for(ii=0; iinConstraint; ii++){ struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii]; @@ -209621,7 +220211,6 @@ static int geopolyUpdate( } if( rc==SQLITE_OK ){ int rc2; - pRtree->iReinsertHeight = -1; rc = rtreeInsertCell(pRtree, pLeaf, &cell, 0); rc2 = nodeRelease(pRtree, pLeaf); if( rc==SQLITE_OK ){ @@ -209678,6 +220267,8 @@ static int geopolyFindFunction( void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), void **ppArg ){ + (void)pVtab; + (void)nArg; if( sqlite3_stricmp(zName, "geopoly_overlap")==0 ){ *pxFunc = geopolyOverlapFunc; *ppArg = 0; @@ -209716,7 +220307,8 @@ static sqlite3_module geopolyModule = { rtreeSavepoint, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - rtreeShadowName /* xShadowName */ + rtreeShadowName, /* xShadowName */ + rtreeIntegrity /* xIntegrity */ }; static int sqlite3_geopoly_init(sqlite3 *db){ @@ -209747,7 +220339,7 @@ static int sqlite3_geopoly_init(sqlite3 *db){ } aAgg[] = { { geopolyBBoxStep, geopolyBBoxFinal, "geopoly_group_bbox" }, }; - int i; + unsigned int i; for(i=0; i naming scheme. +** tables or views named using the data_ naming scheme. ** ** Instead of the plain data_ naming scheme, RBU database tables ** may also be named data_, where is any sequence @@ -210981,7 +221573,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule( ** ** If the target database table is a virtual table or a table that has no ** PRIMARY KEY declaration, the data_% table must also contain a column -** named "rbu_rowid". This column is mapped to the tables implicit primary +** named "rbu_rowid". This column is mapped to the table's implicit primary ** key column - "rowid". Virtual tables for which the "rowid" column does ** not function like a primary key value cannot be updated using RBU. For ** example, if the target db contains either of the following: @@ -211414,6 +222006,34 @@ SQLITE_API void sqlite3rbu_bp_progress(sqlite3rbu *pRbu, int *pnOne, int*pnTwo); SQLITE_API int sqlite3rbu_state(sqlite3rbu *pRbu); +/* +** As part of applying an RBU update or performing an RBU vacuum operation, +** the system must at one point move the *-oal file to the equivalent *-wal +** path. Normally, it does this by invoking POSIX function rename(2) directly. +** Except on WINCE platforms, where it uses win32 API MoveFileW(). This +** function may be used to register a callback that the RBU module will invoke +** instead of one of these APIs. +** +** If a callback is registered with an RBU handle, it invokes it instead +** of rename(2) when it needs to move a file within the file-system. The +** first argument passed to the xRename() callback is a copy of the second +** argument (pArg) passed to this function. The second is the full path +** to the file to move and the third the full path to which it should be +** moved. The callback function should return SQLITE_OK to indicate +** success. If an error occurs, it should return an SQLite error code. +** In this case the RBU operation will be abandoned and the error returned +** to the RBU user. +** +** Passing a NULL pointer in place of the xRename argument to this function +** restores the default behaviour. +*/ +SQLITE_API void sqlite3rbu_rename_handler( + sqlite3rbu *pRbu, + void *pArg, + int (*xRename)(void *pArg, const char *zOld, const char *zNew) +); + + /* ** Create an RBU VFS named zName that accesses the underlying file-system ** via existing VFS zParent. Or, if the zParent parameter is passed NULL, @@ -211781,6 +222401,8 @@ struct sqlite3rbu { int nPagePerSector; /* Pages per sector for pTargetFd */ i64 iOalSz; i64 nPhaseOneStep; + void *pRenameArg; + int (*xRename)(void*, const char*, const char*); /* The following state variables are used as part of the incremental ** checkpoint stage (eStage==RBU_STAGE_CKPT). See comments surrounding @@ -214169,7 +224791,7 @@ static void rbuOpenDatabase(sqlite3rbu *p, sqlite3 *dbMain, int *pbRetry){ sqlite3_file_control(p->dbRbu, "main", SQLITE_FCNTL_RBUCNT, (void*)p); if( p->zState==0 ){ const char *zFile = sqlite3_db_filename(p->dbRbu, "main"); - p->zState = rbuMPrintf(p, "file://%s-vacuum?modeof=%s", zFile, zFile); + p->zState = rbuMPrintf(p, "file:///%s-vacuum?modeof=%s", zFile, zFile); } } @@ -214417,11 +225039,11 @@ static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){ ** no-ops. These locks will not be released until the connection ** is closed. ** - ** * Attempting to xSync() the database file causes an SQLITE_INTERNAL + ** * Attempting to xSync() the database file causes an SQLITE_NOTICE ** error. ** ** As a result, unless an error (i.e. OOM or SQLITE_BUSY) occurs, the - ** checkpoint below fails with SQLITE_INTERNAL, and leaves the aFrame[] + ** checkpoint below fails with SQLITE_NOTICE, and leaves the aFrame[] ** array populated with a set of (frame -> page) mappings. Because the ** WRITER, CHECKPOINT and READ0 locks are still held, it is safe to copy ** data from the wal file into the database file according to the @@ -214431,7 +225053,7 @@ static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){ int rc2; p->eStage = RBU_STAGE_CAPTURE; rc2 = sqlite3_exec(p->dbMain, "PRAGMA main.wal_checkpoint=restart", 0, 0,0); - if( rc2!=SQLITE_INTERNAL ) p->rc = rc2; + if( rc2!=SQLITE_NOTICE ) p->rc = rc2; } if( p->rc==SQLITE_OK && p->nFrame>0 ){ @@ -214477,7 +225099,7 @@ static int rbuCaptureWalRead(sqlite3rbu *pRbu, i64 iOff, int iAmt){ if( pRbu->mLock!=mReq ){ pRbu->rc = SQLITE_BUSY; - return SQLITE_INTERNAL; + return SQLITE_NOTICE_RBU; } pRbu->pgsz = iAmt; @@ -214527,6 +225149,11 @@ static void rbuCheckpointFrame(sqlite3rbu *p, RbuFrame *pFrame){ p->rc = pDb->pMethods->xWrite(pDb, p->aBuf, p->pgsz, iOff); } +/* +** This value is copied from the definition of ZIPVFS_CTRL_FILE_POINTER +** in zipvfs.h. +*/ +#define RBU_ZIPVFS_CTRL_FILE_POINTER 230439 /* ** Take an EXCLUSIVE lock on the database file. Return SQLITE_OK if @@ -214535,9 +225162,20 @@ static void rbuCheckpointFrame(sqlite3rbu *p, RbuFrame *pFrame){ static int rbuLockDatabase(sqlite3 *db){ int rc = SQLITE_OK; sqlite3_file *fd = 0; - sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); - if( fd->pMethods ){ + sqlite3_file_control(db, "main", RBU_ZIPVFS_CTRL_FILE_POINTER, &fd); + if( fd ){ + sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + rc = fd->pMethods->xLock(fd, SQLITE_LOCK_SHARED); + if( rc==SQLITE_OK ){ + rc = fd->pMethods->xUnlock(fd, SQLITE_LOCK_NONE); + } + sqlite3_file_control(db, "main", RBU_ZIPVFS_CTRL_FILE_POINTER, &fd); + }else{ + sqlite3_file_control(db, "main", SQLITE_FCNTL_FILE_POINTER, &fd); + } + + if( rc==SQLITE_OK && fd->pMethods ){ rc = fd->pMethods->xLock(fd, SQLITE_LOCK_SHARED); if( rc==SQLITE_OK ){ rc = fd->pMethods->xLock(fd, SQLITE_LOCK_EXCLUSIVE); @@ -214629,32 +225267,7 @@ static void rbuMoveOalFile(sqlite3rbu *p){ } if( p->rc==SQLITE_OK ){ -#if defined(_WIN32_WCE) - { - LPWSTR zWideOal; - LPWSTR zWideWal; - - zWideOal = rbuWinUtf8ToUnicode(zOal); - if( zWideOal ){ - zWideWal = rbuWinUtf8ToUnicode(zWal); - if( zWideWal ){ - if( MoveFileW(zWideOal, zWideWal) ){ - p->rc = SQLITE_OK; - }else{ - p->rc = SQLITE_IOERR; - } - sqlite3_free(zWideWal); - }else{ - p->rc = SQLITE_IOERR_NOMEM; - } - sqlite3_free(zWideOal); - }else{ - p->rc = SQLITE_IOERR_NOMEM; - } - } -#else - p->rc = rename(zOal, zWal) ? SQLITE_IOERR : SQLITE_OK; -#endif + p->rc = p->xRename(p->pRenameArg, zOal, zWal); } if( p->rc!=SQLITE_OK @@ -215241,7 +225854,8 @@ static void rbuSetupOal(sqlite3rbu *p, RbuState *pState){ static void rbuDeleteOalFile(sqlite3rbu *p){ char *zOal = rbuMPrintf(p, "%s-oal", p->zTarget); if( zOal ){ - sqlite3_vfs *pVfs = sqlite3_vfs_find(0); + sqlite3_vfs *pVfs = 0; + sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_VFS_POINTER, &pVfs); assert( pVfs && p->rc==SQLITE_OK && p->zErrmsg==0 ); pVfs->xDelete(pVfs, zOal, 0); sqlite3_free(zOal); @@ -215393,6 +226007,7 @@ static sqlite3rbu *openRbuHandle( /* Create the custom VFS. */ memset(p, 0, sizeof(sqlite3rbu)); + sqlite3rbu_rename_handler(p, 0, 0); rbuCreateVfs(p); /* Open the target, RBU and state databases */ @@ -215784,6 +226399,54 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){ return rc; } +/* +** Default xRename callback for RBU. +*/ +static int xDefaultRename(void *pArg, const char *zOld, const char *zNew){ + int rc = SQLITE_OK; +#if defined(_WIN32_WCE) + { + LPWSTR zWideOld; + LPWSTR zWideNew; + + zWideOld = rbuWinUtf8ToUnicode(zOld); + if( zWideOld ){ + zWideNew = rbuWinUtf8ToUnicode(zNew); + if( zWideNew ){ + if( MoveFileW(zWideOld, zWideNew) ){ + rc = SQLITE_OK; + }else{ + rc = SQLITE_IOERR; + } + sqlite3_free(zWideNew); + }else{ + rc = SQLITE_IOERR_NOMEM; + } + sqlite3_free(zWideOld); + }else{ + rc = SQLITE_IOERR_NOMEM; + } + } +#else + rc = rename(zOld, zNew) ? SQLITE_IOERR : SQLITE_OK; +#endif + return rc; +} + +SQLITE_API void sqlite3rbu_rename_handler( + sqlite3rbu *pRbu, + void *pArg, + int (*xRename)(void *pArg, const char *zOld, const char *zNew) +){ + if( xRename ){ + pRbu->xRename = xRename; + pRbu->pRenameArg = pArg; + }else{ + pRbu->xRename = xDefaultRename; + pRbu->pRenameArg = 0; + } +} + /************************************************************************** ** Beginning of RBU VFS shim methods. The VFS shim modifies the behaviour ** of a standard VFS in the following ways: @@ -215840,7 +226503,7 @@ SQLITE_API int sqlite3rbu_savestate(sqlite3rbu *p){ ** database file are recorded. xShmLock() calls to unlock the same ** locks are no-ops (so that once obtained, these locks are never ** relinquished). Finally, calls to xSync() on the target database -** file fail with SQLITE_INTERNAL errors. +** file fail with SQLITE_NOTICE errors. */ static void rbuUnlockShm(rbu_file *p){ @@ -215949,9 +226612,12 @@ static int rbuVfsClose(sqlite3_file *pFile){ sqlite3_free(p->zDel); if( p->openFlags & SQLITE_OPEN_MAIN_DB ){ + const sqlite3_io_methods *pMeth = p->pReal->pMethods; rbuMainlistRemove(p); rbuUnlockShm(p); - p->pReal->pMethods->xShmUnmap(p->pReal, 0); + if( pMeth->iVersion>1 && pMeth->xShmUnmap ){ + pMeth->xShmUnmap(p->pReal, 0); + } } else if( (p->openFlags & SQLITE_OPEN_DELETEONCLOSE) && p->pRbu ){ rbuUpdateTempSize(p, 0); @@ -216119,7 +226785,7 @@ static int rbuVfsSync(sqlite3_file *pFile, int flags){ rbu_file *p = (rbu_file *)pFile; if( p->pRbu && p->pRbu->eStage==RBU_STAGE_CAPTURE ){ if( p->openFlags & SQLITE_OPEN_MAIN_DB ){ - return SQLITE_INTERNAL; + return SQLITE_NOTICE_RBU; } return SQLITE_OK; } @@ -216410,6 +227076,25 @@ static int rbuVfsOpen( rbuVfsShmUnmap, /* xShmUnmap */ 0, 0 /* xFetch, xUnfetch */ }; + static sqlite3_io_methods rbuvfs_io_methods1 = { + 1, /* iVersion */ + rbuVfsClose, /* xClose */ + rbuVfsRead, /* xRead */ + rbuVfsWrite, /* xWrite */ + rbuVfsTruncate, /* xTruncate */ + rbuVfsSync, /* xSync */ + rbuVfsFileSize, /* xFileSize */ + rbuVfsLock, /* xLock */ + rbuVfsUnlock, /* xUnlock */ + rbuVfsCheckReservedLock, /* xCheckReservedLock */ + rbuVfsFileControl, /* xFileControl */ + rbuVfsSectorSize, /* xSectorSize */ + rbuVfsDeviceCharacteristics, /* xDeviceCharacteristics */ + 0, 0, 0, 0, 0, 0 + }; + + + rbu_vfs *pRbuVfs = (rbu_vfs*)pVfs; sqlite3_vfs *pRealVfs = pRbuVfs->pRealVfs; rbu_file *pFd = (rbu_file *)pFile; @@ -216464,10 +227149,15 @@ static int rbuVfsOpen( rc = pRealVfs->xOpen(pRealVfs, zOpen, pFd->pReal, oflags, pOutFlags); } if( pFd->pReal->pMethods ){ + const sqlite3_io_methods *pMeth = pFd->pReal->pMethods; /* The xOpen() operation has succeeded. Set the sqlite3_file.pMethods ** pointer and, if the file is a main database file, link it into the ** mutex protected linked list of all such files. */ - pFile->pMethods = &rbuvfs_io_methods; + if( pMeth->iVersion<2 || pMeth->xShmLock==0 ){ + pFile->pMethods = &rbuvfs_io_methods1; + }else{ + pFile->pMethods = &rbuvfs_io_methods; + } if( flags & SQLITE_OPEN_MAIN_DB ){ rbuMainlistAdd(pFd); } @@ -216900,6 +227590,7 @@ static int statConnect( StatTable *pTab = 0; int rc = SQLITE_OK; int iDb; + (void)pAux; if( argc>=4 ){ Token nm; @@ -216953,6 +227644,7 @@ static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ int iSchema = -1; int iName = -1; int iAgg = -1; + (void)tab; /* Look for a valid schema=? constraint. If found, change the idxNum to ** 1 and request the value of that constraint be sent to xFilter. And @@ -217478,6 +228170,8 @@ static int statFilter( int iArg = 0; /* Count of argv[] parameters used so far */ int rc = SQLITE_OK; /* Result of this operation */ const char *zName = 0; /* Only provide analysis of this table */ + (void)argc; + (void)idxStr; statResetCsr(pCsr); sqlite3_finalize(pCsr->pStmt); @@ -217561,16 +228255,16 @@ static int statColumn( } break; case 4: /* ncell */ - sqlite3_result_int(ctx, pCsr->nCell); + sqlite3_result_int64(ctx, pCsr->nCell); break; case 5: /* payload */ - sqlite3_result_int(ctx, pCsr->nPayload); + sqlite3_result_int64(ctx, pCsr->nPayload); break; case 6: /* unused */ - sqlite3_result_int(ctx, pCsr->nUnused); + sqlite3_result_int64(ctx, pCsr->nUnused); break; case 7: /* mx_payload */ - sqlite3_result_int(ctx, pCsr->nMxPayload); + sqlite3_result_int64(ctx, pCsr->nMxPayload); break; case 8: /* pgoffset */ if( !pCsr->isAgg ){ @@ -217578,7 +228272,7 @@ static int statColumn( } break; case 9: /* pgsize */ - sqlite3_result_int(ctx, pCsr->szPage); + sqlite3_result_int64(ctx, pCsr->szPage); break; case 10: { /* schema */ sqlite3 *db = sqlite3_context_db_handle(ctx); @@ -217628,7 +228322,8 @@ SQLITE_PRIVATE int sqlite3DbstatRegister(sqlite3 *db){ 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; return sqlite3_create_module(db, "dbstat", &dbstat_module, 0); } @@ -217712,8 +228407,13 @@ static int dbpageConnect( ){ DbpageTable *pTab = 0; int rc = SQLITE_OK; + (void)pAux; + (void)argc; + (void)argv; + (void)pzErr; sqlite3_vtab_config(db, SQLITE_VTAB_DIRECTONLY); + sqlite3_vtab_config(db, SQLITE_VTAB_USES_ALL_SCHEMAS); rc = sqlite3_declare_vtab(db, "CREATE TABLE x(pgno INTEGER PRIMARY KEY, data BLOB, schema HIDDEN)"); if( rc==SQLITE_OK ){ @@ -217750,6 +228450,7 @@ static int dbpageDisconnect(sqlite3_vtab *pVtab){ static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ int i; int iPlan = 0; + (void)tab; /* If there is a schema= constraint, it must be honored. Report a ** ridiculously large estimated cost if the schema= constraint is @@ -217796,7 +228497,6 @@ static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){ ){ pIdxInfo->orderByConsumed = 1; } - sqlite3VtabUsesAllSchemas(pIdxInfo); return SQLITE_OK; } @@ -217865,6 +228565,8 @@ static int dbpageFilter( sqlite3 *db = pTab->db; Btree *pBt; + (void)idxStr; + /* Default setting is no rows of result */ pCsr->pgno = 1; pCsr->mxPgno = 0; @@ -217879,7 +228581,7 @@ static int dbpageFilter( pCsr->iDb = 0; } pBt = db->aDb[pCsr->iDb].pBt; - if( pBt==0 ) return SQLITE_OK; + if( NEVER(pBt==0) ) return SQLITE_OK; pCsr->pPager = sqlite3BtreePager(pBt); pCsr->szPage = sqlite3BtreeGetPageSize(pBt); pCsr->mxPgno = sqlite3BtreeLastPage(pBt); @@ -217914,12 +228616,18 @@ static int dbpageColumn( } case 1: { /* data */ DbPage *pDbPage = 0; - rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0); - if( rc==SQLITE_OK ){ - sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage, - SQLITE_TRANSIENT); + if( pCsr->pgno==((PENDING_BYTE/pCsr->szPage)+1) ){ + /* The pending byte page. Assume it is zeroed out. Attempting to + ** request this page from the page is an SQLITE_CORRUPT error. */ + sqlite3_result_zeroblob(ctx, pCsr->szPage); + }else{ + rc = sqlite3PagerGet(pCsr->pPager, pCsr->pgno, (DbPage**)&pDbPage, 0); + if( rc==SQLITE_OK ){ + sqlite3_result_blob(ctx, sqlite3PagerGetData(pDbPage), pCsr->szPage, + SQLITE_TRANSIENT); + } + sqlite3PagerUnref(pDbPage); } - sqlite3PagerUnref(pDbPage); break; } default: { /* schema */ @@ -217928,7 +228636,7 @@ static int dbpageColumn( break; } } - return SQLITE_OK; + return rc; } static int dbpageRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ @@ -217954,6 +228662,7 @@ static int dbpageUpdate( Pager *pPager; int szPage; + (void)pRowid; if( pTab->db->flags & SQLITE_Defensive ){ zErr = "read-only"; goto update_fail; @@ -217963,18 +228672,20 @@ static int dbpageUpdate( goto update_fail; } pgno = sqlite3_value_int(argv[0]); - if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){ + if( sqlite3_value_type(argv[0])==SQLITE_NULL + || (Pgno)sqlite3_value_int(argv[1])!=pgno + ){ zErr = "cannot insert"; goto update_fail; } zSchema = (const char*)sqlite3_value_text(argv[4]); - iDb = zSchema ? sqlite3FindDbName(pTab->db, zSchema) : -1; - if( iDb<0 ){ + iDb = ALWAYS(zSchema) ? sqlite3FindDbName(pTab->db, zSchema) : -1; + if( NEVER(iDb<0) ){ zErr = "no such schema"; goto update_fail; } pBt = pTab->db->aDb[iDb].pBt; - if( pgno<1 || pBt==0 || pgno>sqlite3BtreeLastPage(pBt) ){ + if( NEVER(pgno<1) || NEVER(pBt==0) || NEVER(pgno>sqlite3BtreeLastPage(pBt)) ){ zErr = "bad page number"; goto update_fail; } @@ -217988,11 +228699,12 @@ static int dbpageUpdate( pPager = sqlite3BtreePager(pBt); rc = sqlite3PagerGet(pPager, pgno, (DbPage**)&pDbPage, 0); if( rc==SQLITE_OK ){ - rc = sqlite3PagerWrite(pDbPage); - if( rc==SQLITE_OK ){ - memcpy(sqlite3PagerGetData(pDbPage), - sqlite3_value_blob(argv[3]), - szPage); + const void *pData = sqlite3_value_blob(argv[3]); + assert( pData!=0 || pTab->db->mallocFailed ); + if( pData + && (rc = sqlite3PagerWrite(pDbPage))==SQLITE_OK + ){ + memcpy(sqlite3PagerGetData(pDbPage), pData, szPage); } } sqlite3PagerUnref(pDbPage); @@ -218014,7 +228726,7 @@ static int dbpageBegin(sqlite3_vtab *pVtab){ int i; for(i=0; inDb; i++){ Btree *pBt = db->aDb[i].pBt; - if( pBt ) sqlite3BtreeBeginTrans(pBt, 1, 0); + if( pBt ) (void)sqlite3BtreeBeginTrans(pBt, 1, 0); } return SQLITE_OK; } @@ -218048,7 +228760,8 @@ SQLITE_PRIVATE int sqlite3DbpageRegister(sqlite3 *db){ 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; return sqlite3_create_module(db, "sqlite_dbpage", &dbpage_module, 0); } @@ -218085,6 +228798,8 @@ typedef struct SessionInput SessionInput; # endif #endif +#define SESSIONS_ROWID "_rowid_" + static int sessions_strm_chunk_size = SESSIONS_STRM_CHUNK_SIZE; typedef struct SessionHook SessionHook; @@ -218106,6 +228821,7 @@ struct sqlite3_session { int bEnable; /* True if currently recording */ int bIndirect; /* True if all changes are indirect */ int bAutoAttach; /* True to auto-attach tables */ + int bImplicitPK; /* True to handle tables with implicit PK */ int rc; /* Non-zero if an error has occurred */ void *pFilterCtx; /* First argument to pass to xTableFilter */ int (*xTableFilter)(void *pCtx, const char *zTab); @@ -218176,17 +228892,32 @@ struct sqlite3_changeset_iter { ** The data associated with each hash-table entry is a structure containing ** a subset of the initial values that the modified row contained at the ** start of the session. Or no initial values if the row was inserted. +** +** pDfltStmt: +** This is only used by the sqlite3changegroup_xxx() APIs, not by +** regular sqlite3_session objects. It is a SELECT statement that +** selects the default value for each table column. For example, +** if the table is +** +** CREATE TABLE xx(a DEFAULT 1, b, c DEFAULT 'abc') +** +** then this variable is the compiled version of: +** +** SELECT 1, NULL, 'abc' */ struct SessionTable { SessionTable *pNext; char *zName; /* Local name of table */ int nCol; /* Number of columns in table zName */ int bStat1; /* True if this is sqlite_stat1 */ + int bRowid; /* True if this table uses rowid for PK */ const char **azCol; /* Column names */ + const char **azDflt; /* Default value expressions */ u8 *abPK; /* Array of primary key flags */ int nEntry; /* Total number of entries in hash table */ int nChange; /* Size of apChange[] array */ SessionChange **apChange; /* Hash table buckets */ + sqlite3_stmt *pDfltStmt; }; /* @@ -218355,6 +229086,7 @@ struct SessionTable { struct SessionChange { u8 op; /* One of UPDATE, DELETE, INSERT */ u8 bIndirect; /* True if this change is "indirect" */ + u16 nRecordField; /* Number of fields in aRecord[] */ int nMaxSize; /* Max size of eventual changeset record */ int nRecord; /* Number of bytes in buffer aRecord[] */ u8 *aRecord; /* Buffer containing old.* record */ @@ -218380,7 +229112,7 @@ static int sessionVarintLen(int iVal){ ** Read a varint value from aBuf[] into *piVal. Return the number of ** bytes read. */ -static int sessionVarintGet(u8 *aBuf, int *piVal){ +static int sessionVarintGet(const u8 *aBuf, int *piVal){ return getVarint32(aBuf, *piVal); } @@ -218574,6 +229306,7 @@ static unsigned int sessionHashAppendType(unsigned int h, int eType){ */ static int sessionPreupdateHash( sqlite3_session *pSession, /* Session object that owns pTab */ + i64 iRowid, SessionTable *pTab, /* Session table handle */ int bNew, /* True to hash the new.* PK */ int *piHash, /* OUT: Hash value */ @@ -218582,48 +229315,53 @@ static int sessionPreupdateHash( unsigned int h = 0; /* Hash value to return */ int i; /* Used to iterate through columns */ - assert( *pbNullPK==0 ); - assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); - for(i=0; inCol; i++){ - if( pTab->abPK[i] ){ - int rc; - int eType; - sqlite3_value *pVal; - - if( bNew ){ - rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); - }else{ - rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); - } - if( rc!=SQLITE_OK ) return rc; + if( pTab->bRowid ){ + assert( pTab->nCol-1==pSession->hook.xCount(pSession->hook.pCtx) ); + h = sessionHashAppendI64(h, iRowid); + }else{ + assert( *pbNullPK==0 ); + assert( pTab->nCol==pSession->hook.xCount(pSession->hook.pCtx) ); + for(i=0; inCol; i++){ + if( pTab->abPK[i] ){ + int rc; + int eType; + sqlite3_value *pVal; - eType = sqlite3_value_type(pVal); - h = sessionHashAppendType(h, eType); - if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ - i64 iVal; - if( eType==SQLITE_INTEGER ){ - iVal = sqlite3_value_int64(pVal); + if( bNew ){ + rc = pSession->hook.xNew(pSession->hook.pCtx, i, &pVal); }else{ - double rVal = sqlite3_value_double(pVal); - assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); - memcpy(&iVal, &rVal, 8); + rc = pSession->hook.xOld(pSession->hook.pCtx, i, &pVal); } - h = sessionHashAppendI64(h, iVal); - }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ - const u8 *z; - int n; - if( eType==SQLITE_TEXT ){ - z = (const u8 *)sqlite3_value_text(pVal); + if( rc!=SQLITE_OK ) return rc; + + eType = sqlite3_value_type(pVal); + h = sessionHashAppendType(h, eType); + if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ + i64 iVal; + if( eType==SQLITE_INTEGER ){ + iVal = sqlite3_value_int64(pVal); + }else{ + double rVal = sqlite3_value_double(pVal); + assert( sizeof(iVal)==8 && sizeof(rVal)==8 ); + memcpy(&iVal, &rVal, 8); + } + h = sessionHashAppendI64(h, iVal); + }else if( eType==SQLITE_TEXT || eType==SQLITE_BLOB ){ + const u8 *z; + int n; + if( eType==SQLITE_TEXT ){ + z = (const u8 *)sqlite3_value_text(pVal); + }else{ + z = (const u8 *)sqlite3_value_blob(pVal); + } + n = sqlite3_value_bytes(pVal); + if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; + h = sessionHashAppendBlob(h, n, z); }else{ - z = (const u8 *)sqlite3_value_blob(pVal); + assert( eType==SQLITE_NULL ); + assert( pTab->bStat1==0 || i!=1 ); + *pbNullPK = 1; } - n = sqlite3_value_bytes(pVal); - if( !z && (eType!=SQLITE_BLOB || n>0) ) return SQLITE_NOMEM; - h = sessionHashAppendBlob(h, n, z); - }else{ - assert( eType==SQLITE_NULL ); - assert( pTab->bStat1==0 || i!=1 ); - *pbNullPK = 1; } } } @@ -218637,9 +229375,11 @@ static int sessionPreupdateHash( ** Return the number of bytes of space occupied by the value (including ** the type byte). */ -static int sessionSerialLen(u8 *a){ - int e = *a; +static int sessionSerialLen(const u8 *a){ + int e; int n; + assert( a!=0 ); + e = *a; if( e==0 || e==0xFF ) return 1; if( e==SQLITE_NULL ) return 1; if( e==SQLITE_INTEGER || e==SQLITE_FLOAT ) return 9; @@ -218906,6 +229646,7 @@ static int sessionMergeUpdate( */ static int sessionPreupdateEqual( sqlite3_session *pSession, /* Session object that owns SessionTable */ + i64 iRowid, /* Rowid value if pTab->bRowid */ SessionTable *pTab, /* Table associated with change */ SessionChange *pChange, /* Change to compare to */ int op /* Current pre-update operation */ @@ -218913,6 +229654,11 @@ static int sessionPreupdateEqual( int iCol; /* Used to iterate through columns */ u8 *a = pChange->aRecord; /* Cursor used to scan change record */ + if( pTab->bRowid ){ + if( a[0]!=SQLITE_INTEGER ) return 0; + return sessionGetI64(&a[1])==iRowid; + } + assert( op==SQLITE_INSERT || op==SQLITE_UPDATE || op==SQLITE_DELETE ); for(iCol=0; iColnCol; iCol++){ if( !pTab->abPK[iCol] ){ @@ -218935,6 +229681,7 @@ static int sessionPreupdateEqual( rc = pSession->hook.xOld(pSession->hook.pCtx, iCol, &pVal); } assert( rc==SQLITE_OK ); + (void)rc; /* Suppress warning about unused variable */ if( sqlite3_value_type(pVal)!=eType ) return 0; /* A SessionChange object never has a NULL value in a PK column */ @@ -219037,13 +229784,14 @@ static int sessionGrowHash( ** ** For example, if the table is declared as: ** -** CREATE TABLE tbl1(w, x, y, z, PRIMARY KEY(w, z)); +** CREATE TABLE tbl1(w, x DEFAULT 'abc', y, z, PRIMARY KEY(w, z)); ** -** Then the four output variables are populated as follows: +** Then the five output variables are populated as follows: ** ** *pnCol = 4 ** *pzTab = "tbl1" ** *pazCol = {"w", "x", "y", "z"} +** *pazDflt = {NULL, 'abc', NULL, NULL} ** *pabPK = {1, 0, 0, 1} ** ** All returned buffers are part of the same single allocation, which must @@ -219057,7 +229805,9 @@ static int sessionTableInfo( int *pnCol, /* OUT: number of columns */ const char **pzTab, /* OUT: Copy of zThis */ const char ***pazCol, /* OUT: Array of column names for table */ - u8 **pabPK /* OUT: Array of booleans - true for PK col */ + const char ***pazDflt, /* OUT: Array of default value expressions */ + u8 **pabPK, /* OUT: Array of booleans - true for PK col */ + int *pbRowid /* OUT: True if only PK is a rowid */ ){ char *zPragma; sqlite3_stmt *pStmt; @@ -219068,10 +229818,18 @@ static int sessionTableInfo( int i; u8 *pAlloc = 0; char **azCol = 0; + char **azDflt = 0; u8 *abPK = 0; + int bRowid = 0; /* Set to true to use rowid as PK */ assert( pazCol && pabPK ); + *pazCol = 0; + *pabPK = 0; + *pnCol = 0; + if( pzTab ) *pzTab = 0; + if( pazDflt ) *pazDflt = 0; + nThis = sqlite3Strlen30(zThis); if( nThis==12 && 0==sqlite3_stricmp("sqlite_stat1", zThis) ){ rc = sqlite3_table_column_metadata(db, zDb, zThis, 0, 0, 0, 0, 0, 0); @@ -219085,50 +229843,47 @@ static int sessionTableInfo( }else if( rc==SQLITE_ERROR ){ zPragma = sqlite3_mprintf(""); }else{ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; return rc; } }else{ zPragma = sqlite3_mprintf("PRAGMA '%q'.table_info('%q')", zDb, zThis); } if( !zPragma ){ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; return SQLITE_NOMEM; } rc = sqlite3_prepare_v2(db, zPragma, -1, &pStmt, 0); sqlite3_free(zPragma); if( rc!=SQLITE_OK ){ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; return rc; } nByte = nThis + 1; + bRowid = (pbRowid!=0); while( SQLITE_ROW==sqlite3_step(pStmt) ){ - nByte += sqlite3_column_bytes(pStmt, 1); + nByte += sqlite3_column_bytes(pStmt, 1); /* name */ + nByte += sqlite3_column_bytes(pStmt, 4); /* dflt_value */ nDbCol++; + if( sqlite3_column_int(pStmt, 5) ) bRowid = 0; /* pk */ } + if( nDbCol==0 ) bRowid = 0; + nDbCol += bRowid; + nByte += strlen(SESSIONS_ROWID); rc = sqlite3_reset(pStmt); if( rc==SQLITE_OK ){ - nByte += nDbCol * (sizeof(const char *) + sizeof(u8) + 1); + nByte += nDbCol * (sizeof(const char *)*2 + sizeof(u8) + 1 + 1); pAlloc = sessionMalloc64(pSession, nByte); if( pAlloc==0 ){ rc = SQLITE_NOMEM; + }else{ + memset(pAlloc, 0, nByte); } } if( rc==SQLITE_OK ){ azCol = (char **)pAlloc; - pAlloc = (u8 *)&azCol[nDbCol]; + azDflt = (char**)&azCol[nDbCol]; + pAlloc = (u8 *)&azDflt[nDbCol]; abPK = (u8 *)pAlloc; pAlloc = &abPK[nDbCol]; if( pzTab ){ @@ -219138,43 +229893,57 @@ static int sessionTableInfo( } i = 0; + if( bRowid ){ + size_t nName = strlen(SESSIONS_ROWID); + memcpy(pAlloc, SESSIONS_ROWID, nName+1); + azCol[i] = (char*)pAlloc; + pAlloc += nName+1; + abPK[i] = 1; + i++; + } while( SQLITE_ROW==sqlite3_step(pStmt) ){ int nName = sqlite3_column_bytes(pStmt, 1); + int nDflt = sqlite3_column_bytes(pStmt, 4); const unsigned char *zName = sqlite3_column_text(pStmt, 1); + const unsigned char *zDflt = sqlite3_column_text(pStmt, 4); + if( zName==0 ) break; memcpy(pAlloc, zName, nName+1); azCol[i] = (char *)pAlloc; pAlloc += nName+1; + if( zDflt ){ + memcpy(pAlloc, zDflt, nDflt+1); + azDflt[i] = (char *)pAlloc; + pAlloc += nDflt+1; + }else{ + azDflt[i] = 0; + } abPK[i] = sqlite3_column_int(pStmt, 5); i++; } rc = sqlite3_reset(pStmt); - } /* If successful, populate the output variables. Otherwise, zero them and ** free any allocation made. An error code will be returned in this case. */ if( rc==SQLITE_OK ){ - *pazCol = (const char **)azCol; + *pazCol = (const char**)azCol; + if( pazDflt ) *pazDflt = (const char**)azDflt; *pabPK = abPK; *pnCol = nDbCol; }else{ - *pazCol = 0; - *pabPK = 0; - *pnCol = 0; - if( pzTab ) *pzTab = 0; sessionFree(pSession, azCol); } + if( pbRowid ) *pbRowid = bRowid; sqlite3_finalize(pStmt); return rc; } /* -** This function is only called from within a pre-update handler for a -** write to table pTab, part of session pSession. If this is the first -** write to this table, initalize the SessionTable.nCol, azCol[] and -** abPK[] arrays accordingly. +** This function is called to initialize the SessionTable.nCol, azCol[] +** abPK[] and azDflt[] members of SessionTable object pTab. If these +** fields are already initilialized, this function is a no-op. ** ** If an error occurs, an error code is stored in sqlite3_session.rc and ** non-zero returned. Or, if no error occurs but the table has no primary @@ -219182,14 +229951,22 @@ static int sessionTableInfo( ** indicate that updates on this table should be ignored. SessionTable.abPK ** is set to NULL in this case. */ -static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ +static int sessionInitTable( + sqlite3_session *pSession, /* Optional session handle */ + SessionTable *pTab, /* Table object to initialize */ + sqlite3 *db, /* Database handle to read schema from */ + const char *zDb /* Name of db - "main", "temp" etc. */ +){ + int rc = SQLITE_OK; + if( pTab->nCol==0 ){ u8 *abPK; assert( pTab->azCol==0 || pTab->abPK==0 ); - pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, - pTab->zName, &pTab->nCol, 0, &pTab->azCol, &abPK + rc = sessionTableInfo(pSession, db, zDb, + pTab->zName, &pTab->nCol, 0, &pTab->azCol, &pTab->azDflt, &abPK, + ((pSession==0 || pSession->bImplicitPK) ? &pTab->bRowid : 0) ); - if( pSession->rc==SQLITE_OK ){ + if( rc==SQLITE_OK ){ int i; for(i=0; inCol; i++){ if( abPK[i] ){ @@ -219201,14 +229978,321 @@ static int sessionInitTable(sqlite3_session *pSession, SessionTable *pTab){ pTab->bStat1 = 1; } - if( pSession->bEnableSize ){ + if( pSession && pSession->bEnableSize ){ pSession->nMaxChangesetSize += ( 1 + sessionVarintLen(pTab->nCol) + pTab->nCol + strlen(pTab->zName)+1 ); } } } - return (pSession->rc || pTab->abPK==0); + + if( pSession ){ + pSession->rc = rc; + return (rc || pTab->abPK==0); + } + return rc; +} + +/* +** Re-initialize table object pTab. +*/ +static int sessionReinitTable(sqlite3_session *pSession, SessionTable *pTab){ + int nCol = 0; + const char **azCol = 0; + const char **azDflt = 0; + u8 *abPK = 0; + int bRowid = 0; + + assert( pSession->rc==SQLITE_OK ); + + pSession->rc = sessionTableInfo(pSession, pSession->db, pSession->zDb, + pTab->zName, &nCol, 0, &azCol, &azDflt, &abPK, + (pSession->bImplicitPK ? &bRowid : 0) + ); + if( pSession->rc==SQLITE_OK ){ + if( pTab->nCol>nCol || pTab->bRowid!=bRowid ){ + pSession->rc = SQLITE_SCHEMA; + }else{ + int ii; + int nOldCol = pTab->nCol; + for(ii=0; iinCol ){ + if( pTab->abPK[ii]!=abPK[ii] ){ + pSession->rc = SQLITE_SCHEMA; + } + }else if( abPK[ii] ){ + pSession->rc = SQLITE_SCHEMA; + } + } + + if( pSession->rc==SQLITE_OK ){ + const char **a = pTab->azCol; + pTab->azCol = azCol; + pTab->nCol = nCol; + pTab->azDflt = azDflt; + pTab->abPK = abPK; + azCol = a; + } + if( pSession->bEnableSize ){ + pSession->nMaxChangesetSize += (nCol - nOldCol); + pSession->nMaxChangesetSize += sessionVarintLen(nCol); + pSession->nMaxChangesetSize -= sessionVarintLen(nOldCol); + } + } + } + + sqlite3_free((char*)azCol); + return pSession->rc; +} + +/* +** Session-change object (*pp) contains an old.* record with fewer than +** nCol fields. This function updates it with the default values for +** the missing fields. +*/ +static void sessionUpdateOneChange( + sqlite3_session *pSession, /* For memory accounting */ + int *pRc, /* IN/OUT: Error code */ + SessionChange **pp, /* IN/OUT: Change object to update */ + int nCol, /* Number of columns now in table */ + sqlite3_stmt *pDflt /* SELECT */ +){ + SessionChange *pOld = *pp; + + while( pOld->nRecordFieldnRecordField; + int eType = sqlite3_column_type(pDflt, iField); + switch( eType ){ + case SQLITE_NULL: + nIncr = 1; + break; + case SQLITE_INTEGER: + case SQLITE_FLOAT: + nIncr = 9; + break; + default: { + int n = sqlite3_column_bytes(pDflt, iField); + nIncr = 1 + sessionVarintLen(n) + n; + assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB ); + break; + } + } + + nByte = nIncr + (sizeof(SessionChange) + pOld->nRecord); + pNew = sessionMalloc64(pSession, nByte); + if( pNew==0 ){ + *pRc = SQLITE_NOMEM; + return; + }else{ + memcpy(pNew, pOld, sizeof(SessionChange)); + pNew->aRecord = (u8*)&pNew[1]; + memcpy(pNew->aRecord, pOld->aRecord, pOld->nRecord); + pNew->aRecord[pNew->nRecord++] = (u8)eType; + switch( eType ){ + case SQLITE_INTEGER: { + i64 iVal = sqlite3_column_int64(pDflt, iField); + sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal); + pNew->nRecord += 8; + break; + } + + case SQLITE_FLOAT: { + double rVal = sqlite3_column_double(pDflt, iField); + i64 iVal = 0; + memcpy(&iVal, &rVal, sizeof(rVal)); + sessionPutI64(&pNew->aRecord[pNew->nRecord], iVal); + pNew->nRecord += 8; + break; + } + + case SQLITE_TEXT: { + int n = sqlite3_column_bytes(pDflt, iField); + const char *z = (const char*)sqlite3_column_text(pDflt, iField); + pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n); + memcpy(&pNew->aRecord[pNew->nRecord], z, n); + pNew->nRecord += n; + break; + } + + case SQLITE_BLOB: { + int n = sqlite3_column_bytes(pDflt, iField); + const u8 *z = (const u8*)sqlite3_column_blob(pDflt, iField); + pNew->nRecord += sessionVarintPut(&pNew->aRecord[pNew->nRecord], n); + memcpy(&pNew->aRecord[pNew->nRecord], z, n); + pNew->nRecord += n; + break; + } + + default: + assert( eType==SQLITE_NULL ); + break; + } + + sessionFree(pSession, pOld); + *pp = pOld = pNew; + pNew->nRecordField++; + pNew->nMaxSize += nIncr; + if( pSession ){ + pSession->nMaxChangesetSize += nIncr; + } + } + } +} + +/* +** Ensure that there is room in the buffer to append nByte bytes of data. +** If not, use sqlite3_realloc() to grow the buffer so that there is. +** +** If successful, return zero. Otherwise, if an OOM condition is encountered, +** set *pRc to SQLITE_NOMEM and return non-zero. +*/ +static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){ +#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1) + i64 nReq = p->nBuf + nByte; + if( *pRc==SQLITE_OK && nReq>p->nAlloc ){ + u8 *aNew; + i64 nNew = p->nAlloc ? p->nAlloc : 128; + + do { + nNew = nNew*2; + }while( nNewSESSION_MAX_BUFFER_SZ ){ + nNew = SESSION_MAX_BUFFER_SZ; + if( nNewaBuf, nNew); + if( 0==aNew ){ + *pRc = SQLITE_NOMEM; + }else{ + p->aBuf = aNew; + p->nAlloc = nNew; + } + } + return (*pRc!=SQLITE_OK); +} + + +/* +** This function is a no-op if *pRc is other than SQLITE_OK when it is +** called. Otherwise, append a string to the buffer. All bytes in the string +** up to (but not including) the nul-terminator are written to the buffer. +** +** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before +** returning. +*/ +static void sessionAppendStr( + SessionBuffer *p, + const char *zStr, + int *pRc +){ + int nStr = sqlite3Strlen30(zStr); + if( 0==sessionBufferGrow(p, nStr+1, pRc) ){ + memcpy(&p->aBuf[p->nBuf], zStr, nStr); + p->nBuf += nStr; + p->aBuf[p->nBuf] = 0x00; + } +} + +/* +** Format a string using printf() style formatting and then append it to the +** buffer using sessionAppendString(). +*/ +static void sessionAppendPrintf( + SessionBuffer *p, /* Buffer to append to */ + int *pRc, + const char *zFmt, + ... +){ + if( *pRc==SQLITE_OK ){ + char *zApp = 0; + va_list ap; + va_start(ap, zFmt); + zApp = sqlite3_vmprintf(zFmt, ap); + if( zApp==0 ){ + *pRc = SQLITE_NOMEM; + }else{ + sessionAppendStr(p, zApp, pRc); + } + va_end(ap); + sqlite3_free(zApp); + } +} + +/* +** Prepare a statement against database handle db that SELECTs a single +** row containing the default values for each column in table pTab. For +** example, if pTab is declared as: +** +** CREATE TABLE pTab(a PRIMARY KEY, b DEFAULT 123, c DEFAULT 'abcd'); +** +** Then this function prepares and returns the SQL statement: +** +** SELECT NULL, 123, 'abcd'; +*/ +static int sessionPrepareDfltStmt( + sqlite3 *db, /* Database handle */ + SessionTable *pTab, /* Table to prepare statement for */ + sqlite3_stmt **ppStmt /* OUT: Statement handle */ +){ + SessionBuffer sql = {0,0,0}; + int rc = SQLITE_OK; + const char *zSep = " "; + int ii = 0; + + *ppStmt = 0; + sessionAppendPrintf(&sql, &rc, "SELECT"); + for(ii=0; iinCol; ii++){ + const char *zDflt = pTab->azDflt[ii] ? pTab->azDflt[ii] : "NULL"; + sessionAppendPrintf(&sql, &rc, "%s%s", zSep, zDflt); + zSep = ", "; + } + if( rc==SQLITE_OK ){ + rc = sqlite3_prepare_v2(db, (const char*)sql.aBuf, -1, ppStmt, 0); + } + sqlite3_free(sql.aBuf); + + return rc; +} + +/* +** Table pTab has one or more existing change-records with old.* records +** with fewer than pTab->nCol columns. This function updates all such +** change-records with the default values for the missing columns. +*/ +static int sessionUpdateChanges(sqlite3_session *pSession, SessionTable *pTab){ + sqlite3_stmt *pStmt = 0; + int rc = pSession->rc; + + rc = sessionPrepareDfltStmt(pSession->db, pTab, &pStmt); + if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + int ii = 0; + SessionChange **pp = 0; + for(ii=0; iinChange; ii++){ + for(pp=&pTab->apChange[ii]; *pp; pp=&((*pp)->pNext)){ + if( (*pp)->nRecordField!=pTab->nCol ){ + sessionUpdateOneChange(pSession, &rc, pp, pTab->nCol, pStmt); + } + } + } + } + + pSession->rc = rc; + rc = sqlite3_finalize(pStmt); + if( pSession->rc==SQLITE_OK ) pSession->rc = rc; + return pSession->rc; } /* @@ -219259,6 +230343,7 @@ static int sessionUpdateMaxSize( ){ i64 nNew = 2; if( pC->op==SQLITE_INSERT ){ + if( pTab->bRowid ) nNew += 9; if( op!=SQLITE_DELETE ){ int ii; for(ii=0; iinCol; ii++){ @@ -219275,12 +230360,16 @@ static int sessionUpdateMaxSize( }else{ int ii; u8 *pCsr = pC->aRecord; - for(ii=0; iinCol; ii++){ + if( pTab->bRowid ){ + nNew += 9 + 1; + pCsr += 9; + } + for(ii=pTab->bRowid; iinCol; ii++){ int bChanged = 1; int nOld = 0; int eType; sqlite3_value *p = 0; - pSession->hook.xNew(pSession->hook.pCtx, ii, &p); + pSession->hook.xNew(pSession->hook.pCtx, ii-pTab->bRowid, &p); if( p==0 ){ return SQLITE_NOMEM; } @@ -219359,22 +230448,29 @@ static int sessionUpdateMaxSize( */ static void sessionPreupdateOneChange( int op, /* One of SQLITE_UPDATE, INSERT, DELETE */ + i64 iRowid, sqlite3_session *pSession, /* Session object pTab is attached to */ SessionTable *pTab /* Table that change applies to */ ){ int iHash; int bNull = 0; int rc = SQLITE_OK; + int nExpect = 0; SessionStat1Ctx stat1 = {{0,0,0,0,0},0}; if( pSession->rc ) return; /* Load table details if required */ - if( sessionInitTable(pSession, pTab) ) return; + if( sessionInitTable(pSession, pTab, pSession->db, pSession->zDb) ) return; /* Check the number of columns in this xPreUpdate call matches the ** number of columns in the table. */ - if( pTab->nCol!=pSession->hook.xCount(pSession->hook.pCtx) ){ + nExpect = pSession->hook.xCount(pSession->hook.pCtx); + if( (pTab->nCol-pTab->bRowid)nCol-pTab->bRowid)!=nExpect ){ pSession->rc = SQLITE_SCHEMA; return; } @@ -219407,14 +230503,16 @@ static void sessionPreupdateOneChange( /* Calculate the hash-key for this change. If the primary key of the row ** includes a NULL value, exit early. Such changes are ignored by the ** session module. */ - rc = sessionPreupdateHash(pSession, pTab, op==SQLITE_INSERT, &iHash, &bNull); + rc = sessionPreupdateHash( + pSession, iRowid, pTab, op==SQLITE_INSERT, &iHash, &bNull + ); if( rc!=SQLITE_OK ) goto error_out; if( bNull==0 ){ /* Search the hash table for an existing record for this row. */ SessionChange *pC; for(pC=pTab->apChange[iHash]; pC; pC=pC->pNext){ - if( sessionPreupdateEqual(pSession, pTab, pC, op) ) break; + if( sessionPreupdateEqual(pSession, iRowid, pTab, pC, op) ) break; } if( pC==0 ){ @@ -219429,7 +230527,7 @@ static void sessionPreupdateOneChange( /* Figure out how large an allocation is required */ nByte = sizeof(SessionChange); - for(i=0; inCol; i++){ + for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ TESTONLY(int trc = ) pSession->hook.xOld(pSession->hook.pCtx, i, &p); @@ -219444,9 +230542,12 @@ static void sessionPreupdateOneChange( rc = sessionSerializeValue(0, p, &nByte); if( rc!=SQLITE_OK ) goto error_out; } + if( pTab->bRowid ){ + nByte += 9; /* Size of rowid field - an integer */ + } /* Allocate the change object */ - pC = (SessionChange *)sessionMalloc64(pSession, nByte); + pC = (SessionChange*)sessionMalloc64(pSession, nByte); if( !pC ){ rc = SQLITE_NOMEM; goto error_out; @@ -219460,7 +230561,12 @@ static void sessionPreupdateOneChange( ** required values and encodings have already been cached in memory. ** It is not possible for an OOM to occur in this block. */ nByte = 0; - for(i=0; inCol; i++){ + if( pTab->bRowid ){ + pC->aRecord[0] = SQLITE_INTEGER; + sessionPutI64(&pC->aRecord[1], iRowid); + nByte = 9; + } + for(i=0; i<(pTab->nCol-pTab->bRowid); i++){ sqlite3_value *p = 0; if( op!=SQLITE_INSERT ){ pSession->hook.xOld(pSession->hook.pCtx, i, &p); @@ -219474,6 +230580,7 @@ static void sessionPreupdateOneChange( if( pSession->bIndirect || pSession->hook.xDepth(pSession->hook.pCtx) ){ pC->bIndirect = 1; } + pC->nRecordField = pTab->nCol; pC->nRecord = nByte; pC->op = op; pC->pNext = pTab->apChange[iHash]; @@ -219559,6 +230666,8 @@ static void xPreUpdate( int nDb = sqlite3Strlen30(zDb); assert( sqlite3_mutex_held(db->mutex) ); + (void)iKey1; + (void)iKey2; for(pSession=(sqlite3_session *)pCtx; pSession; pSession=pSession->pNext){ SessionTable *pTab; @@ -219573,9 +230682,10 @@ static void xPreUpdate( pSession->rc = sessionFindTable(pSession, zName, &pTab); if( pTab ){ assert( pSession->rc==SQLITE_OK ); - sessionPreupdateOneChange(op, pSession, pTab); + assert( op==SQLITE_UPDATE || iKey1==iKey2 ); + sessionPreupdateOneChange(op, iKey1, pSession, pTab); if( op==SQLITE_UPDATE ){ - sessionPreupdateOneChange(SQLITE_INSERT, pSession, pTab); + sessionPreupdateOneChange(SQLITE_INSERT, iKey2, pSession, pTab); } } } @@ -219614,6 +230724,7 @@ static void sessionPreupdateHooks( typedef struct SessionDiffCtx SessionDiffCtx; struct SessionDiffCtx { sqlite3_stmt *pStmt; + int bRowid; int nOldOff; }; @@ -219622,19 +230733,20 @@ struct SessionDiffCtx { */ static int sessionDiffOld(void *pCtx, int iVal, sqlite3_value **ppVal){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff); + *ppVal = sqlite3_column_value(p->pStmt, iVal+p->nOldOff+p->bRowid); return SQLITE_OK; } static int sessionDiffNew(void *pCtx, int iVal, sqlite3_value **ppVal){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - *ppVal = sqlite3_column_value(p->pStmt, iVal); + *ppVal = sqlite3_column_value(p->pStmt, iVal+p->bRowid); return SQLITE_OK; } static int sessionDiffCount(void *pCtx){ SessionDiffCtx *p = (SessionDiffCtx*)pCtx; - return p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt); + return (p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt)) - p->bRowid; } static int sessionDiffDepth(void *pCtx){ + (void)pCtx; return 0; } @@ -219708,17 +230820,18 @@ static char *sessionExprCompareOther( } static char *sessionSelectFindNew( - int nCol, const char *zDb1, /* Pick rows in this db only */ const char *zDb2, /* But not in this one */ + int bRowid, const char *zTbl, /* Table name */ const char *zExpr ){ + const char *zSel = (bRowid ? SESSIONS_ROWID ", *" : "*"); char *zRet = sqlite3_mprintf( - "SELECT * FROM \"%w\".\"%w\" WHERE NOT EXISTS (" + "SELECT %s FROM \"%w\".\"%w\" WHERE NOT EXISTS (" " SELECT 1 FROM \"%w\".\"%w\" WHERE %s" ")", - zDb1, zTbl, zDb2, zTbl, zExpr + zSel, zDb1, zTbl, zDb2, zTbl, zExpr ); return zRet; } @@ -219732,7 +230845,9 @@ static int sessionDiffFindNew( char *zExpr ){ int rc = SQLITE_OK; - char *zStmt = sessionSelectFindNew(pTab->nCol, zDb1, zDb2, pTab->zName,zExpr); + char *zStmt = sessionSelectFindNew( + zDb1, zDb2, pTab->bRowid, pTab->zName, zExpr + ); if( zStmt==0 ){ rc = SQLITE_NOMEM; @@ -219743,8 +230858,10 @@ static int sessionDiffFindNew( SessionDiffCtx *pDiffCtx = (SessionDiffCtx*)pSession->hook.pCtx; pDiffCtx->pStmt = pStmt; pDiffCtx->nOldOff = 0; + pDiffCtx->bRowid = pTab->bRowid; while( SQLITE_ROW==sqlite3_step(pStmt) ){ - sessionPreupdateOneChange(op, pSession, pTab); + i64 iRowid = (pTab->bRowid ? sqlite3_column_int64(pStmt, 0) : 0); + sessionPreupdateOneChange(op, iRowid, pSession, pTab); } rc = sqlite3_finalize(pStmt); } @@ -219754,6 +230871,27 @@ static int sessionDiffFindNew( return rc; } +/* +** Return a comma-separated list of the fully-qualified (with both database +** and table name) column names from table pTab. e.g. +** +** "main"."t1"."a", "main"."t1"."b", "main"."t1"."c" +*/ +static char *sessionAllCols( + const char *zDb, + SessionTable *pTab +){ + int ii; + char *zRet = 0; + for(ii=0; iinCol; ii++){ + zRet = sqlite3_mprintf("%z%s\"%w\".\"%w\".\"%w\"", + zRet, (zRet ? ", " : ""), zDb, pTab->zName, pTab->azCol[ii] + ); + if( !zRet ) break; + } + return zRet; +} + static int sessionDiffFindModified( sqlite3_session *pSession, SessionTable *pTab, @@ -219768,11 +230906,13 @@ static int sessionDiffFindModified( if( zExpr2==0 ){ rc = SQLITE_NOMEM; }else{ + char *z1 = sessionAllCols(pSession->zDb, pTab); + char *z2 = sessionAllCols(zFrom, pTab); char *zStmt = sqlite3_mprintf( - "SELECT * FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)", - pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2 + "SELECT %s,%s FROM \"%w\".\"%w\", \"%w\".\"%w\" WHERE %s AND (%z)", + z1, z2, pSession->zDb, pTab->zName, zFrom, pTab->zName, zExpr, zExpr2 ); - if( zStmt==0 ){ + if( zStmt==0 || z1==0 || z2==0 ){ rc = SQLITE_NOMEM; }else{ sqlite3_stmt *pStmt; @@ -219783,12 +230923,15 @@ static int sessionDiffFindModified( pDiffCtx->pStmt = pStmt; pDiffCtx->nOldOff = pTab->nCol; while( SQLITE_ROW==sqlite3_step(pStmt) ){ - sessionPreupdateOneChange(SQLITE_UPDATE, pSession, pTab); + i64 iRowid = (pTab->bRowid ? sqlite3_column_int64(pStmt, 0) : 0); + sessionPreupdateOneChange(SQLITE_UPDATE, iRowid, pSession, pTab); } rc = sqlite3_finalize(pStmt); } - sqlite3_free(zStmt); } + sqlite3_free(zStmt); + sqlite3_free(z1); + sqlite3_free(z2); } return rc; @@ -219817,7 +230960,7 @@ SQLITE_API int sqlite3session_diff( /* Locate and if necessary initialize the target table object */ rc = sessionFindTable(pSession, zTbl, &pTo); if( pTo==0 ) goto diff_out; - if( sessionInitTable(pSession, pTo) ){ + if( sessionInitTable(pSession, pTo, pSession->db, pSession->zDb) ){ rc = pSession->rc; goto diff_out; } @@ -219827,9 +230970,12 @@ SQLITE_API int sqlite3session_diff( int bHasPk = 0; int bMismatch = 0; int nCol; /* Columns in zFrom.zTbl */ + int bRowid = 0; u8 *abPK; const char **azCol = 0; - rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, &abPK); + rc = sessionTableInfo(0, db, zFrom, zTbl, &nCol, 0, &azCol, 0, &abPK, + pSession->bImplicitPK ? &bRowid : 0 + ); if( rc==SQLITE_OK ){ if( pTo->nCol!=nCol ){ bMismatch = 1; @@ -219942,6 +231088,7 @@ static void sessionDeleteTable(sqlite3_session *pSession, SessionTable *pList){ sessionFree(pSession, p); } } + sqlite3_finalize(pTab->pDfltStmt); sessionFree(pSession, (char*)pTab->azCol); /* cast works around VC++ bug */ sessionFree(pSession, pTab->apChange); sessionFree(pSession, pTab); @@ -219974,9 +231121,7 @@ SQLITE_API void sqlite3session_delete(sqlite3_session *pSession){ ** associated hash-tables. */ sessionDeleteTable(pSession, pSession->pTable); - /* Assert that all allocations have been freed and then free the - ** session object itself. */ - assert( pSession->nMalloc==0 ); + /* Free the session object. */ sqlite3_free(pSession); } @@ -220047,48 +231192,6 @@ SQLITE_API int sqlite3session_attach( return rc; } -/* -** Ensure that there is room in the buffer to append nByte bytes of data. -** If not, use sqlite3_realloc() to grow the buffer so that there is. -** -** If successful, return zero. Otherwise, if an OOM condition is encountered, -** set *pRc to SQLITE_NOMEM and return non-zero. -*/ -static int sessionBufferGrow(SessionBuffer *p, i64 nByte, int *pRc){ -#define SESSION_MAX_BUFFER_SZ (0x7FFFFF00 - 1) - i64 nReq = p->nBuf + nByte; - if( *pRc==SQLITE_OK && nReq>p->nAlloc ){ - u8 *aNew; - i64 nNew = p->nAlloc ? p->nAlloc : 128; - - do { - nNew = nNew*2; - }while( nNewSESSION_MAX_BUFFER_SZ ){ - nNew = SESSION_MAX_BUFFER_SZ; - if( nNewaBuf, nNew); - if( 0==aNew ){ - *pRc = SQLITE_NOMEM; - }else{ - p->aBuf = aNew; - p->nAlloc = nNew; - } - } - return (*pRc!=SQLITE_OK); -} - /* ** Append the value passed as the second argument to the buffer passed ** as the first. @@ -220157,26 +231260,6 @@ static void sessionAppendBlob( } } -/* -** This function is a no-op if *pRc is other than SQLITE_OK when it is -** called. Otherwise, append a string to the buffer. All bytes in the string -** up to (but not including) the nul-terminator are written to the buffer. -** -** If an OOM condition is encountered, set *pRc to SQLITE_NOMEM before -** returning. -*/ -static void sessionAppendStr( - SessionBuffer *p, - const char *zStr, - int *pRc -){ - int nStr = sqlite3Strlen30(zStr); - if( 0==sessionBufferGrow(p, nStr, pRc) ){ - memcpy(&p->aBuf[p->nBuf], zStr, nStr); - p->nBuf += nStr; - } -} - /* ** This function is a no-op if *pRc is other than SQLITE_OK when it is ** called. Otherwise, append the string representation of integer iVal @@ -220209,7 +231292,7 @@ static void sessionAppendIdent( const char *zStr, /* String to quote, escape and append */ int *pRc /* IN/OUT: Error code */ ){ - int nStr = sqlite3Strlen30(zStr)*2 + 2 + 1; + int nStr = sqlite3Strlen30(zStr)*2 + 2 + 2; if( 0==sessionBufferGrow(p, nStr, pRc) ){ char *zOut = (char *)&p->aBuf[p->nBuf]; const char *zIn = zStr; @@ -220220,6 +231303,7 @@ static void sessionAppendIdent( } *zOut++ = '"'; p->nBuf = (int)((u8 *)zOut - p->aBuf); + p->aBuf[p->nBuf] = 0x00; } } @@ -220355,7 +231439,7 @@ static int sessionAppendUpdate( /* If at least one field has been modified, this is not a no-op. */ if( bChanged ) bNoop = 0; - /* Add a field to the old.* record. This is omitted if this modules is + /* Add a field to the old.* record. This is omitted if this module is ** currently generating a patchset. */ if( bPatchset==0 ){ if( bChanged || abPK[i] ){ @@ -220444,12 +231528,20 @@ static int sessionAppendDelete( ** Formulate and prepare a SELECT statement to retrieve a row from table ** zTab in database zDb based on its primary key. i.e. ** -** SELECT * FROM zDb.zTab WHERE pk1 = ? AND pk2 = ? AND ... +** SELECT *, FROM zDb.zTab WHERE (pk1, pk2,...) IS (?1, ?2,...) +** +** where is: +** +** 1 AND (?A OR ?1 IS ) AND ... +** +** for each non-pk . */ static int sessionSelectStmt( sqlite3 *db, /* Database handle */ + int bIgnoreNoop, const char *zDb, /* Database name */ const char *zTab, /* Table name */ + int bRowid, int nCol, /* Number of columns in table */ const char **azCol, /* Names of table columns */ u8 *abPK, /* PRIMARY KEY array */ @@ -220457,8 +231549,50 @@ static int sessionSelectStmt( ){ int rc = SQLITE_OK; char *zSql = 0; + const char *zSep = ""; + const char *zCols = bRowid ? SESSIONS_ROWID ", *" : "*"; int nSql = -1; + int i; + + SessionBuffer nooptest = {0, 0, 0}; + SessionBuffer pkfield = {0, 0, 0}; + SessionBuffer pkvar = {0, 0, 0}; + + sessionAppendStr(&nooptest, ", 1", &rc); + if( 0==sqlite3_stricmp("sqlite_stat1", zTab) ){ + sessionAppendStr(&nooptest, " AND (?6 OR ?3 IS stat)", &rc); + sessionAppendStr(&pkfield, "tbl, idx", &rc); + sessionAppendStr(&pkvar, + "?1, (CASE WHEN ?2=X'' THEN NULL ELSE ?2 END)", &rc + ); + zCols = "tbl, ?2, stat"; + }else{ + for(i=0; ipTable; rc==SQLITE_OK && pTab; pTab=pTab->pNext){ if( pTab->nEntry ){ const char *zName = pTab->zName; - int nCol = 0; /* Number of columns in table */ - u8 *abPK = 0; /* Primary key array */ - const char **azCol = 0; /* Table columns */ int i; /* Used to iterate through hash buckets */ sqlite3_stmt *pSel = 0; /* SELECT statement to query table pTab */ int nRewind = buf.nBuf; /* Initial size of write buffer */ int nNoop; /* Size of buffer after writing tbl header */ + int nOldCol = pTab->nCol; /* Check the table schema is still Ok. */ - rc = sessionTableInfo(0, db, pSession->zDb, zName, &nCol, 0,&azCol,&abPK); - if( !rc && (pTab->nCol!=nCol || memcmp(abPK, pTab->abPK, nCol)) ){ - rc = SQLITE_SCHEMA; + rc = sessionReinitTable(pSession, pTab); + if( rc==SQLITE_OK && pTab->nCol!=nOldCol ){ + rc = sessionUpdateChanges(pSession, pTab); } /* Write a table header */ @@ -220650,8 +231785,9 @@ static int sessionGenerateChangeset( /* Build and compile a statement to execute: */ if( rc==SQLITE_OK ){ - rc = sessionSelectStmt( - db, pSession->zDb, zName, nCol, azCol, abPK, &pSel); + rc = sessionSelectStmt(db, 0, pSession->zDb, + zName, pTab->bRowid, pTab->nCol, pTab->azCol, pTab->abPK, &pSel + ); } nNoop = buf.nBuf; @@ -220659,22 +231795,22 @@ static int sessionGenerateChangeset( SessionChange *p; /* Used to iterate through changes */ for(p=pTab->apChange[i]; rc==SQLITE_OK && p; p=p->pNext){ - rc = sessionSelectBind(pSel, nCol, abPK, p); + rc = sessionSelectBind(pSel, pTab->nCol, pTab->abPK, p); if( rc!=SQLITE_OK ) continue; if( sqlite3_step(pSel)==SQLITE_ROW ){ if( p->op==SQLITE_INSERT ){ int iCol; sessionAppendByte(&buf, SQLITE_INSERT, &rc); sessionAppendByte(&buf, p->bIndirect, &rc); - for(iCol=0; iColnCol; iCol++){ sessionAppendCol(&buf, pSel, iCol, &rc); } }else{ - assert( abPK!=0 ); /* Because sessionSelectStmt() returned ok */ - rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, abPK); + assert( pTab->abPK!=0 ); + rc = sessionAppendUpdate(&buf, bPatchset, pSel, p, pTab->abPK); } }else if( p->op!=SQLITE_INSERT ){ - rc = sessionAppendDelete(&buf, bPatchset, p, nCol, abPK); + rc = sessionAppendDelete(&buf, bPatchset, p, pTab->nCol,pTab->abPK); } if( rc==SQLITE_OK ){ rc = sqlite3_reset(pSel); @@ -220699,7 +231835,6 @@ static int sessionGenerateChangeset( if( buf.nBuf==nNoop ){ buf.nBuf = nRewind; } - sqlite3_free((char*)azCol); /* cast works around VC++ bug */ } } @@ -220734,7 +231869,7 @@ SQLITE_API int sqlite3session_changeset( int rc; if( pnChangeset==0 || ppChangeset==0 ) return SQLITE_MISUSE; - rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset,ppChangeset); + rc = sessionGenerateChangeset(pSession, 0, 0, 0, pnChangeset, ppChangeset); assert( rc || pnChangeset==0 || pSession->bEnableSize==0 || *pnChangeset<=pSession->nMaxChangesetSize ); @@ -220852,6 +231987,19 @@ SQLITE_API int sqlite3session_object_config(sqlite3_session *pSession, int op, v break; } + case SQLITE_SESSION_OBJCONFIG_ROWID: { + int iArg = *(int*)pArg; + if( iArg>=0 ){ + if( pSession->pTable ){ + rc = SQLITE_MISUSE; + }else{ + pSession->bImplicitPK = (iArg!=0); + } + } + *(int*)pArg = pSession->bImplicitPK; + break; + } + default: rc = SQLITE_MISUSE; } @@ -221110,15 +232258,19 @@ static int sessionReadRecord( } } if( eType==SQLITE_INTEGER || eType==SQLITE_FLOAT ){ - sqlite3_int64 v = sessionGetI64(aVal); - if( eType==SQLITE_INTEGER ){ - sqlite3VdbeMemSetInt64(apOut[i], v); + if( (pIn->nData-pIn->iNext)<8 ){ + rc = SQLITE_CORRUPT_BKPT; }else{ - double d; - memcpy(&d, &v, 8); - sqlite3VdbeMemSetDouble(apOut[i], d); + sqlite3_int64 v = sessionGetI64(aVal); + if( eType==SQLITE_INTEGER ){ + sqlite3VdbeMemSetInt64(apOut[i], v); + }else{ + double d; + memcpy(&d, &v, 8); + sqlite3VdbeMemSetDouble(apOut[i], d); + } + pIn->iNext += 8; } - pIn->iNext += 8; } } } @@ -221387,6 +232539,22 @@ static int sessionChangesetNextOne( if( p->op==SQLITE_INSERT ) p->op = SQLITE_DELETE; else if( p->op==SQLITE_DELETE ) p->op = SQLITE_INSERT; } + + /* If this is an UPDATE that is part of a changeset, then check that + ** there are no fields in the old.* record that are not (a) PK fields, + ** or (b) also present in the new.* record. + ** + ** Such records are technically corrupt, but the rebaser was at one + ** point generating them. Under most circumstances this is benign, but + ** can cause spurious SQLITE_RANGE errors when applying the changeset. */ + if( p->bPatchset==0 && p->op==SQLITE_UPDATE){ + for(i=0; inCol; i++){ + if( p->abPK[i]==0 && p->apValue[i+p->nCol]==0 ){ + sqlite3ValueFree(p->apValue[i]); + p->apValue[i] = 0; + } + } + } } return SQLITE_ROW; @@ -221824,6 +232992,8 @@ struct SessionApplyCtx { SessionBuffer rebase; /* Rebase information (if any) here */ u8 bRebaseStarted; /* If table header is already in rebase */ u8 bRebase; /* True to collect rebase information */ + u8 bIgnoreNoop; /* True to ignore no-op conflicts */ + int bRowid; }; /* Number of prepared UPDATE statements to cache. */ @@ -222074,8 +233244,10 @@ static int sessionSelectRow( const char *zTab, /* Table name */ SessionApplyCtx *p /* Session changeset-apply context */ ){ - return sessionSelectStmt( - db, "main", zTab, p->nCol, p->azCol, p->abPK, &p->pSelect); + /* TODO */ + return sessionSelectStmt(db, p->bIgnoreNoop, + "main", zTab, p->bRowid, p->nCol, p->azCol, p->abPK, &p->pSelect + ); } /* @@ -222233,22 +233405,34 @@ static int sessionBindRow( ** UPDATE, bind values from the old.* record. */ static int sessionSeekToRow( - sqlite3 *db, /* Database handle */ sqlite3_changeset_iter *pIter, /* Changeset iterator */ - u8 *abPK, /* Primary key flags array */ - sqlite3_stmt *pSelect /* SELECT statement from sessionSelectRow() */ + SessionApplyCtx *p ){ + sqlite3_stmt *pSelect = p->pSelect; int rc; /* Return code */ int nCol; /* Number of columns in table */ int op; /* Changset operation (SQLITE_UPDATE etc.) */ const char *zDummy; /* Unused */ + sqlite3_clear_bindings(pSelect); sqlite3changeset_op(pIter, &zDummy, &nCol, &op, 0); rc = sessionBindRow(pIter, op==SQLITE_INSERT ? sqlite3changeset_new : sqlite3changeset_old, - nCol, abPK, pSelect + nCol, p->abPK, pSelect ); + if( op!=SQLITE_DELETE && p->bIgnoreNoop ){ + int ii; + for(ii=0; rc==SQLITE_OK && iiabPK[ii]==0 ){ + sqlite3_value *pVal = 0; + sqlite3changeset_new(pIter, ii, &pVal); + sqlite3_bind_int(pSelect, ii+1+nCol, (pVal==0)); + if( pVal ) rc = sessionBindValue(pSelect, ii+1, pVal); + } + } + } + if( rc==SQLITE_OK ){ rc = sqlite3_step(pSelect); if( rc!=SQLITE_ROW ) rc = sqlite3_reset(pSelect); @@ -222363,16 +233547,22 @@ static int sessionConflictHandler( /* Bind the new.* PRIMARY KEY values to the SELECT statement. */ if( pbReplace ){ - rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect); + rc = sessionSeekToRow(pIter, p); }else{ rc = SQLITE_OK; } if( rc==SQLITE_ROW ){ /* There exists another row with the new.* primary key. */ - pIter->pConflict = p->pSelect; - res = xConflict(pCtx, eType, pIter); - pIter->pConflict = 0; + if( p->bIgnoreNoop + && sqlite3_column_int(p->pSelect, sqlite3_column_count(p->pSelect)-1) + ){ + res = SQLITE_CHANGESET_OMIT; + }else{ + pIter->pConflict = p->pSelect; + res = xConflict(pCtx, eType, pIter); + pIter->pConflict = 0; + } rc = sqlite3_reset(p->pSelect); }else if( rc==SQLITE_OK ){ if( p->bDeferConstraints && eType==SQLITE_CHANGESET_CONFLICT ){ @@ -222480,7 +233670,7 @@ static int sessionApplyOneOp( sqlite3_step(p->pDelete); rc = sqlite3_reset(p->pDelete); - if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 ){ + if( rc==SQLITE_OK && sqlite3_changes(p->db)==0 && p->bIgnoreNoop==0 ){ rc = sessionConflictHandler( SQLITE_CHANGESET_DATA, p, pIter, xConflict, pCtx, pbRetry ); @@ -222537,7 +233727,7 @@ static int sessionApplyOneOp( /* Check if there is a conflicting row. For sqlite_stat1, this needs ** to be done using a SELECT, as there is no PRIMARY KEY in the ** database schema to throw an exception if a duplicate is inserted. */ - rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect); + rc = sessionSeekToRow(pIter, p); if( rc==SQLITE_ROW ){ rc = SQLITE_CONSTRAINT; sqlite3_reset(p->pSelect); @@ -222714,6 +233904,7 @@ static int sessionChangesetApply( memset(&sApply, 0, sizeof(sApply)); sApply.bRebase = (ppRebase && pnRebase); sApply.bInvertConstraints = !!(flags & SQLITE_CHANGESETAPPLY_INVERT); + sApply.bIgnoreNoop = !!(flags & SQLITE_CHANGESETAPPLY_IGNORENOOP); sqlite3_mutex_enter(sqlite3_db_mutex(db)); if( (flags & SQLITE_CHANGESETAPPLY_NOSAVEPOINT)==0 ){ rc = sqlite3_exec(db, "SAVEPOINT changeset_apply", 0, 0, 0); @@ -222751,6 +233942,7 @@ static int sessionChangesetApply( sApply.bStat1 = 0; sApply.bDeferConstraints = 1; sApply.bRebaseStarted = 0; + sApply.bRowid = 0; memset(&sApply.constraints, 0, sizeof(SessionBuffer)); /* If an xFilter() callback was specified, invoke it now. If the @@ -222770,8 +233962,8 @@ static int sessionChangesetApply( int i; sqlite3changeset_pk(pIter, &abPK, 0); - rc = sessionTableInfo(0, - db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK + rc = sessionTableInfo(0, db, "main", zNew, + &sApply.nCol, &zTab, &sApply.azCol, 0, &sApply.abPK, &sApply.bRowid ); if( rc!=SQLITE_OK ) break; for(i=0; iflags & SQLITE_FkNoAction; + + if( flags & SQLITE_CHANGESETAPPLY_FKNOACTION ){ + db->flags |= ((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } + if( rc==SQLITE_OK ){ rc = sessionChangesetApply( db, pIter, xFilter, xConflict, pCtx, ppRebase, pnRebase, flags ); } + + if( (flags & SQLITE_CHANGESETAPPLY_FKNOACTION) && savedFlag==0 ){ + assert( db->flags & SQLITE_FkNoAction ); + db->flags &= ~((u64)SQLITE_FkNoAction); + db->aDb[0].pSchema->schema_cookie -= 32; + } return rc; } @@ -222995,6 +234200,9 @@ struct sqlite3_changegroup { int rc; /* Error code */ int bPatch; /* True to accumulate patchsets */ SessionTable *pList; /* List of tables in current patch */ + + sqlite3 *db; /* Configured by changegroup_schema() */ + char *zDb; /* Configured by changegroup_schema() */ }; /* @@ -223015,6 +234223,7 @@ static int sessionChangeMerge( ){ SessionChange *pNew = 0; int rc = SQLITE_OK; + assert( aRec!=0 ); if( !pExist ){ pNew = (SessionChange *)sqlite3_malloc64(sizeof(SessionChange) + nRec); @@ -223180,6 +234389,114 @@ static int sessionChangeMerge( return rc; } +/* +** Check if a changeset entry with nCol columns and the PK array passed +** as the final argument to this function is compatible with SessionTable +** pTab. If so, return 1. Otherwise, if they are incompatible in some way, +** return 0. +*/ +static int sessionChangesetCheckCompat( + SessionTable *pTab, + int nCol, + u8 *abPK +){ + if( pTab->azCol && nColnCol ){ + int ii; + for(ii=0; iinCol; ii++){ + u8 bPK = (ii < nCol) ? abPK[ii] : 0; + if( pTab->abPK[ii]!=bPK ) return 0; + } + return 1; + } + return (pTab->nCol==nCol && 0==memcmp(abPK, pTab->abPK, nCol)); +} + +static int sessionChangesetExtendRecord( + sqlite3_changegroup *pGrp, + SessionTable *pTab, + int nCol, + int op, + const u8 *aRec, + int nRec, + SessionBuffer *pOut +){ + int rc = SQLITE_OK; + int ii = 0; + + assert( pTab->azCol ); + assert( nColnCol ); + + pOut->nBuf = 0; + if( op==SQLITE_INSERT || (op==SQLITE_DELETE && pGrp->bPatch==0) ){ + /* Append the missing default column values to the record. */ + sessionAppendBlob(pOut, aRec, nRec, &rc); + if( rc==SQLITE_OK && pTab->pDfltStmt==0 ){ + rc = sessionPrepareDfltStmt(pGrp->db, pTab, &pTab->pDfltStmt); + } + for(ii=nCol; rc==SQLITE_OK && iinCol; ii++){ + int eType = sqlite3_column_type(pTab->pDfltStmt, ii); + sessionAppendByte(pOut, eType, &rc); + switch( eType ){ + case SQLITE_FLOAT: + case SQLITE_INTEGER: { + i64 iVal; + if( eType==SQLITE_INTEGER ){ + iVal = sqlite3_column_int64(pTab->pDfltStmt, ii); + }else{ + double rVal = sqlite3_column_int64(pTab->pDfltStmt, ii); + memcpy(&iVal, &rVal, sizeof(i64)); + } + if( SQLITE_OK==sessionBufferGrow(pOut, 8, &rc) ){ + sessionPutI64(&pOut->aBuf[pOut->nBuf], iVal); + } + break; + } + + case SQLITE_BLOB: + case SQLITE_TEXT: { + int n = sqlite3_column_bytes(pTab->pDfltStmt, ii); + sessionAppendVarint(pOut, n, &rc); + if( eType==SQLITE_TEXT ){ + const u8 *z = (const u8*)sqlite3_column_text(pTab->pDfltStmt, ii); + sessionAppendBlob(pOut, z, n, &rc); + }else{ + const u8 *z = (const u8*)sqlite3_column_blob(pTab->pDfltStmt, ii); + sessionAppendBlob(pOut, z, n, &rc); + } + break; + } + + default: + assert( eType==SQLITE_NULL ); + break; + } + } + }else if( op==SQLITE_UPDATE ){ + /* Append missing "undefined" entries to the old.* record. And, if this + ** is an UPDATE, to the new.* record as well. */ + int iOff = 0; + if( pGrp->bPatch==0 ){ + for(ii=0; iinCol-nCol); ii++){ + sessionAppendByte(pOut, 0x00, &rc); + } + } + + sessionAppendBlob(pOut, &aRec[iOff], nRec-iOff, &rc); + for(ii=0; ii<(pTab->nCol-nCol); ii++){ + sessionAppendByte(pOut, 0x00, &rc); + } + }else{ + assert( op==SQLITE_DELETE && pGrp->bPatch ); + sessionAppendBlob(pOut, aRec, nRec, &rc); + } + + return rc; +} + /* ** Add all changes in the changeset traversed by the iterator passed as ** the first argument to the changegroup hash tables. @@ -223193,6 +234510,7 @@ static int sessionChangesetToHash( int nRec; int rc = SQLITE_OK; SessionTable *pTab = 0; + SessionBuffer rec = {0, 0, 0}; while( SQLITE_ROW==sessionChangesetNext(pIter, &aRec, &nRec, 0) ){ const char *zNew; @@ -223204,6 +234522,9 @@ static int sessionChangesetToHash( SessionChange *pExist = 0; SessionChange **pp; + /* Ensure that only changesets, or only patchsets, but not a mixture + ** of both, are being combined. It is an error to try to combine a + ** changeset and a patchset. */ if( pGrp->pList==0 ){ pGrp->bPatch = pIter->bPatchset; }else if( pIter->bPatchset!=pGrp->bPatch ){ @@ -223236,18 +234557,38 @@ static int sessionChangesetToHash( pTab->zName = (char*)&pTab->abPK[nCol]; memcpy(pTab->zName, zNew, nNew+1); + if( pGrp->db ){ + pTab->nCol = 0; + rc = sessionInitTable(0, pTab, pGrp->db, pGrp->zDb); + if( rc ){ + assert( pTab->azCol==0 ); + sqlite3_free(pTab); + break; + } + } + /* The new object must be linked on to the end of the list, not ** simply added to the start of it. This is to ensure that the ** tables within the output of sqlite3changegroup_output() are in ** the right order. */ for(ppTab=&pGrp->pList; *ppTab; ppTab=&(*ppTab)->pNext); *ppTab = pTab; - }else if( pTab->nCol!=nCol || memcmp(pTab->abPK, abPK, nCol) ){ + } + + if( !sessionChangesetCheckCompat(pTab, nCol, abPK) ){ rc = SQLITE_SCHEMA; break; } } + if( nColnCol ){ + assert( pGrp->db ); + rc = sessionChangesetExtendRecord(pGrp, pTab, nCol, op, aRec, nRec, &rec); + if( rc ) break; + aRec = rec.aBuf; + nRec = rec.nBuf; + } + if( sessionGrowHash(0, pIter->bPatchset, pTab) ){ rc = SQLITE_NOMEM; break; @@ -223285,6 +234626,7 @@ static int sessionChangesetToHash( } } + sqlite3_free(rec.aBuf); if( rc==SQLITE_OK ) rc = pIter->rc; return rc; } @@ -223371,6 +234713,31 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp){ return rc; } +/* +** Provide a database schema to the changegroup object. +*/ +SQLITE_API int sqlite3changegroup_schema( + sqlite3_changegroup *pGrp, + sqlite3 *db, + const char *zDb +){ + int rc = SQLITE_OK; + + if( pGrp->pList || pGrp->db ){ + /* Cannot add a schema after one or more calls to sqlite3changegroup_add(), + ** or after sqlite3changegroup_schema() has already been called. */ + rc = SQLITE_MISUSE; + }else{ + pGrp->zDb = sqlite3_mprintf("%s", zDb); + if( pGrp->zDb==0 ){ + rc = SQLITE_NOMEM; + }else{ + pGrp->db = db; + } + } + return rc; +} + /* ** Add the changeset currently stored in buffer pData, size nData bytes, ** to changeset-group p. @@ -223434,6 +234801,7 @@ SQLITE_API int sqlite3changegroup_output_strm( */ SQLITE_API void sqlite3changegroup_delete(sqlite3_changegroup *pGrp){ if( pGrp ){ + sqlite3_free(pGrp->zDb); sessionDeleteTable(0, pGrp->pList); sqlite3_free(pGrp); } @@ -223583,7 +234951,7 @@ static void sessionAppendPartialUpdate( if( !pIter->abPK[i] && a1[0] ) bData = 1; memcpy(pOut, a1, n1); pOut += n1; - }else if( a2[0]!=0xFF ){ + }else if( a2[0]!=0xFF && a1[0] ){ bData = 1; memcpy(pOut, a2, n2); pOut += n2; @@ -223966,8 +235334,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -223977,8 +235348,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -223994,12 +235367,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -224025,6 +235399,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -224139,6 +235517,39 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. This API is not available if the specified token matches a +** prefix query term. In that case both output variables are always set +** to 0. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. */ struct Fts5ExtensionApi { int iVersion; /* Currently always set to 3 */ @@ -224176,6 +235587,13 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); }; /* @@ -224370,8 +235788,8 @@ struct Fts5ExtensionApi { ** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; @@ -224419,7 +235837,7 @@ struct fts5_api { int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) ); @@ -224428,7 +235846,7 @@ struct fts5_api { int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer ); @@ -224436,7 +235854,7 @@ struct fts5_api { int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) ); @@ -224608,6 +236026,10 @@ typedef struct Fts5Config Fts5Config; ** attempt to merge together. A value of 1 sets the object to use the ** compile time default. Zero disables auto-merge altogether. ** +** bContentlessDelete: +** True if the contentless_delete option was present in the CREATE +** VIRTUAL TABLE statement. +** ** zContent: ** ** zContentRowid: @@ -224642,9 +236064,11 @@ struct Fts5Config { int nPrefix; /* Number of prefix indexes */ int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */ int eContent; /* An FTS5_CONTENT value */ + int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */ char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */ + int bTokendata; /* "tokendata=" option value (dflt==0) */ int eDetail; /* FTS5_DETAIL_XXX value */ char *zContentExprlist; Fts5Tokenizer *pTok; @@ -224653,6 +236077,7 @@ struct Fts5Config { int ePattern; /* FTS_PATTERN_XXX constant */ /* Values loaded from the %_config table */ + int iVersion; /* fts5 file format 'version' */ int iCookie; /* Incremented when %_config is modified */ int pgsz; /* Approximate page size used in %_data */ int nAutomerge; /* 'automerge' setting */ @@ -224661,6 +236086,8 @@ struct Fts5Config { int nHashSize; /* Bytes of memory for in-memory hash */ char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ + int bSecureDelete; /* 'secure-delete' */ + int nDeleteMerge; /* 'deletemerge' */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg; @@ -224670,8 +236097,11 @@ struct Fts5Config { #endif }; -/* Current expected value of %_config table 'version' field */ -#define FTS5_CURRENT_VERSION 4 +/* Current expected value of %_config table 'version' field. And +** the expected version if the 'secure-delete' option has ever been +** set on the table. */ +#define FTS5_CURRENT_VERSION 4 +#define FTS5_CURRENT_VERSION_SECUREDELETE 5 #define FTS5_CONTENT_NORMAL 0 #define FTS5_CONTENT_NONE 1 @@ -224740,7 +236170,7 @@ static void sqlite3Fts5BufferAppendPrintf(int *, Fts5Buffer*, char *zFmt, ...); static char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...); #define fts5BufferZero(x) sqlite3Fts5BufferZero(x) -#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,c) +#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,(i64)c) #define fts5BufferFree(a) sqlite3Fts5BufferFree(a) #define fts5BufferAppendBlob(a,b,c,d) sqlite3Fts5BufferAppendBlob(a,b,c,d) #define fts5BufferSet(a,b,c,d) sqlite3Fts5BufferSet(a,b,c,d) @@ -224827,16 +236257,19 @@ struct Fts5IndexIter { /* ** Values used as part of the flags argument passed to IndexQuery(). */ -#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ -#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ -#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ -#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ +#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ +#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ +#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ +#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ /* The following are used internally by the fts5_index.c module. They are ** defined here only to make it easier to avoid clashes with the flags ** above. */ -#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 -#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 +#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 +#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 +#define FTS5INDEX_QUERY_SKIPHASH 0x0040 +#define FTS5INDEX_QUERY_NOTOKENDATA 0x0080 +#define FTS5INDEX_QUERY_SCANONETERM 0x0100 /* ** Create/destroy an Fts5Index object. @@ -224905,6 +236338,10 @@ static void *sqlite3Fts5StructureRef(Fts5Index*); static void sqlite3Fts5StructureRelease(void*); static int sqlite3Fts5StructureTest(Fts5Index*, void*); +/* +** Used by xInstToken(): +*/ +static int sqlite3Fts5IterToken(Fts5IndexIter*, i64, int, int, const char**, int*); /* ** Insert or remove data to or from the index. Each time a document is @@ -224979,6 +236416,16 @@ static int sqlite3Fts5IndexReset(Fts5Index *p); static int sqlite3Fts5IndexLoadConfig(Fts5Index *p); +static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin); +static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid); + +static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter*); + +/* Used to populate hash tables for xInstToken in detail=none/column mode. */ +static int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter*, const char*, int, i64 iRowid, int iCol, int iOff +); + /* ** End of interface to code in fts5_index.c. **************************************************************************/ @@ -224991,7 +236438,7 @@ static int sqlite3Fts5GetVarintLen(u32 iVal); static u8 sqlite3Fts5GetVarint(const unsigned char*, u64*); static int sqlite3Fts5PutVarint(unsigned char *p, u64 v); -#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&b) +#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&(b)) #define fts5GetVarint sqlite3Fts5GetVarint #define fts5FastGetVarint32(a, iOff, nVal) { \ @@ -225063,6 +236510,11 @@ static int sqlite3Fts5HashWrite( */ static void sqlite3Fts5HashClear(Fts5Hash*); +/* +** Return true if the hash is empty, false otherwise. +*/ +static int sqlite3Fts5HashIsEmpty(Fts5Hash*); + static int sqlite3Fts5HashQuery( Fts5Hash*, /* Hash table to query */ int nPre, @@ -225079,11 +236531,13 @@ static void sqlite3Fts5HashScanNext(Fts5Hash*); static int sqlite3Fts5HashScanEof(Fts5Hash*); static void sqlite3Fts5HashScanEntry(Fts5Hash *, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ); + /* ** End of interface to code in fts5_hash.c. **************************************************************************/ @@ -225204,6 +236658,10 @@ static int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**); static int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *); +static int sqlite3Fts5ExprQueryToken(Fts5Expr*, int, int, const char**, int*); +static int sqlite3Fts5ExprInstToken(Fts5Expr*, i64, int, int, int, int, const char**, int*); +static void sqlite3Fts5ExprClearTokens(Fts5Expr*); + /******************************************* ** The fts5_expr.c API above this point is used by the other hand-written ** C code in this module. The interfaces below this point are called by @@ -225327,7 +236785,8 @@ static void sqlite3Fts5UnicodeAscii(u8*, u8*); #define FTS5_STAR 15 /* This file is automatically generated by Lemon from input grammar -** source file "fts5parse.y". */ +** source file "fts5parse.y". +*/ /* ** 2000-05-29 ** @@ -226917,15 +238376,19 @@ static int fts5CInstIterInit( */ typedef struct HighlightContext HighlightContext; struct HighlightContext { - CInstIter iter; /* Coalesced Instance Iterator */ - int iPos; /* Current token offset in zIn[] */ + /* Constant parameters to fts5HighlightCb() */ int iRangeStart; /* First token to include */ int iRangeEnd; /* If non-zero, last token to include */ const char *zOpen; /* Opening highlight */ const char *zClose; /* Closing highlight */ const char *zIn; /* Input text */ int nIn; /* Size of input text in bytes */ - int iOff; /* Current offset within zIn[] */ + + /* Variables modified by fts5HighlightCb() */ + CInstIter iter; /* Coalesced Instance Iterator */ + int iPos; /* Current token offset in zIn[] */ + int iOff; /* Have copied up to this offset in zIn[] */ + int bOpen; /* True if highlight is open */ char *zOut; /* Output value */ }; @@ -226958,8 +238421,8 @@ static int fts5HighlightCb( int tflags, /* Mask of FTS5_TOKEN_* flags */ const char *pToken, /* Buffer containing token */ int nToken, /* Size of token in bytes */ - int iStartOff, /* Start offset of token */ - int iEndOff /* End offset of token */ + int iStartOff, /* Start byte offset of token */ + int iEndOff /* End byte offset of token */ ){ HighlightContext *p = (HighlightContext*)pContext; int rc = SQLITE_OK; @@ -226970,35 +238433,60 @@ static int fts5HighlightCb( if( tflags & FTS5_TOKEN_COLOCATED ) return SQLITE_OK; iPos = p->iPos++; - if( p->iRangeEnd>0 ){ + if( p->iRangeEnd>=0 ){ if( iPosiRangeStart || iPos>p->iRangeEnd ) return SQLITE_OK; if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff; } - if( iPos==p->iter.iStart ){ + /* If the parenthesis is open, and this token is not part of the current + ** phrase, and the starting byte offset of this token is past the point + ** that has currently been copied into the output buffer, close the + ** parenthesis. */ + if( p->bOpen + && (iPos<=p->iter.iStart || p->iter.iStart<0) + && iStartOff>p->iOff + ){ + fts5HighlightAppend(&rc, p, p->zClose, -1); + p->bOpen = 0; + } + + /* If this is the start of a new phrase, and the highlight is not open: + ** + ** * copy text from the input up to the start of the phrase, and + ** * open the highlight. + */ + if( iPos==p->iter.iStart && p->bOpen==0 ){ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iStartOff - p->iOff); fts5HighlightAppend(&rc, p, p->zOpen, -1); p->iOff = iStartOff; + p->bOpen = 1; } if( iPos==p->iter.iEnd ){ - if( p->iRangeEnd && p->iter.iStartiRangeStart ){ + if( p->bOpen==0 ){ + assert( p->iRangeEnd>=0 ); fts5HighlightAppend(&rc, p, p->zOpen, -1); + p->bOpen = 1; } fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); - fts5HighlightAppend(&rc, p, p->zClose, -1); p->iOff = iEndOff; + if( rc==SQLITE_OK ){ rc = fts5CInstIterNext(&p->iter); } } - if( p->iRangeEnd>0 && iPos==p->iRangeEnd ){ - fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); - p->iOff = iEndOff; - if( iPos>=p->iter.iStart && iPositer.iEnd ){ + if( iPos==p->iRangeEnd ){ + if( p->bOpen ){ + if( p->iter.iStart>=0 && iPos>=p->iter.iStart ){ + fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); + p->iOff = iEndOff; + } fts5HighlightAppend(&rc, p, p->zClose, -1); + p->bOpen = 0; } + fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); + p->iOff = iEndOff; } return rc; @@ -227028,9 +238516,12 @@ static void fts5HighlightFunction( memset(&ctx, 0, sizeof(HighlightContext)); ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]); ctx.zClose = (const char*)sqlite3_value_text(apVal[2]); + ctx.iRangeEnd = -1; rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn); - - if( ctx.zIn ){ + if( rc==SQLITE_RANGE ){ + sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC); + rc = SQLITE_OK; + }else if( ctx.zIn ){ if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter); } @@ -227038,6 +238529,9 @@ static void fts5HighlightFunction( if( rc==SQLITE_OK ){ rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); } + if( ctx.bOpen ){ + fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); + } fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff); if( rc==SQLITE_OK ){ @@ -227213,6 +238707,7 @@ static void fts5SnippetFunction( iCol = sqlite3_value_int(apVal[0]); ctx.zOpen = fts5ValueToText(apVal[1]); ctx.zClose = fts5ValueToText(apVal[2]); + ctx.iRangeEnd = -1; zEllips = fts5ValueToText(apVal[3]); nToken = sqlite3_value_int(apVal[4]); @@ -227315,6 +238810,9 @@ static void fts5SnippetFunction( if( rc==SQLITE_OK ){ rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); } + if( ctx.bOpen ){ + fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); + } if( ctx.iRangeEnd>=(nColSize-1) ){ fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff); }else{ @@ -227590,6 +239088,7 @@ static void sqlite3Fts5BufferAppendBlob( ){ if( nData ){ if( fts5BufferGrow(pRc, pBuf, nData) ) return; + assert( pBuf->p!=0 ); memcpy(&pBuf->p[pBuf->n], pData, nData); pBuf->n += nData; } @@ -227691,6 +239190,7 @@ static int sqlite3Fts5PoslistNext64( i64 *piOff /* IN/OUT: Current offset */ ){ int i = *pi; + assert( a!=0 || i==0 ); if( i>=n ){ /* EOF */ *piOff = -1; @@ -227698,6 +239198,7 @@ static int sqlite3Fts5PoslistNext64( }else{ i64 iOff = *piOff; u32 iVal; + assert( a!=0 ); fts5FastGetVarint32(a, i, iVal); if( iVal<=1 ){ if( iVal==0 ){ @@ -227953,6 +239454,8 @@ static void sqlite3Fts5TermsetFree(Fts5Termset *p){ #define FTS5_DEFAULT_CRISISMERGE 16 #define FTS5_DEFAULT_HASHSIZE (1024*1024) +#define FTS5_DEFAULT_DELETE_AUTOMERGE 10 /* default 10% */ + /* Maximum allowed page size */ #define FTS5_MAX_PAGE_SIZE (64*1024) @@ -228283,6 +239786,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("contentless_delete", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bContentlessDelete = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){ if( pConfig->zContentRowid ){ *pzErr = sqlite3_mprintf("multiple content_rowid=... directives"); @@ -228317,6 +239830,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("tokendata", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed tokendata=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bTokendata = (zArg[0]=='1'); + } + return rc; + } + *pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd); return SQLITE_ERROR; } @@ -228481,6 +240004,7 @@ static int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; } + assert( (pRet->abUnindexed && pRet->azCol) || rc!=SQLITE_OK ); for(i=3; rc==SQLITE_OK && ibContentlessDelete + && pRet->eContent!=FTS5_CONTENT_NONE + ){ + *pzErr = sqlite3_mprintf( + "contentless_delete=1 requires a contentless table" + ); + rc = SQLITE_ERROR; + } + + /* We only allow contentless_delete=1 if columnsize=0 is not present. + ** + ** This restriction may be removed at some point. + */ + if( rc==SQLITE_OK && pRet->bContentlessDelete && pRet->bColumnsize==0 ){ + *pzErr = sqlite3_mprintf( + "contentless_delete=1 is incompatible with columnsize=0" + ); + rc = SQLITE_ERROR; + } + /* If a tokenizer= option was successfully parsed, the tokenizer has ** already been allocated. Otherwise, allocate an instance of the default ** tokenizer (unicode61) now. */ @@ -228820,6 +240366,18 @@ static int sqlite3Fts5ConfigSetValue( } } + else if( 0==sqlite3_stricmp(zKey, "deletemerge") ){ + int nVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + nVal = sqlite3_value_int(pVal); + }else{ + *pbBadkey = 1; + } + if( nVal<0 ) nVal = FTS5_DEFAULT_DELETE_AUTOMERGE; + if( nVal>100 ) nVal = 0; + pConfig->nDeleteMerge = nVal; + } + else if( 0==sqlite3_stricmp(zKey, "rank") ){ const char *zIn = (const char*)sqlite3_value_text(pVal); char *zRank; @@ -228834,6 +240392,18 @@ static int sqlite3Fts5ConfigSetValue( rc = SQLITE_OK; *pbBadkey = 1; } + } + + else if( 0==sqlite3_stricmp(zKey, "secure-delete") ){ + int bVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + bVal = sqlite3_value_int(pVal); + } + if( bVal<0 ){ + *pbBadkey = 1; + }else{ + pConfig->bSecureDelete = (bVal ? 1 : 0); + } }else{ *pbBadkey = 1; } @@ -228856,6 +240426,7 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE; pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE; pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE; + pConfig->nDeleteMerge = FTS5_DEFAULT_DELETE_AUTOMERGE; zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName); if( zSql ){ @@ -228878,15 +240449,20 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ rc = sqlite3_finalize(p); } - if( rc==SQLITE_OK && iVersion!=FTS5_CURRENT_VERSION ){ + if( rc==SQLITE_OK + && iVersion!=FTS5_CURRENT_VERSION + && iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE + ){ rc = SQLITE_ERROR; if( pConfig->pzErrmsg ){ assert( 0==*pConfig->pzErrmsg ); - *pConfig->pzErrmsg = sqlite3_mprintf( - "invalid fts5 file format (found %d, expected %d) - run 'rebuild'", - iVersion, FTS5_CURRENT_VERSION + *pConfig->pzErrmsg = sqlite3_mprintf("invalid fts5 file format " + "(found %d, expected %d or %d) - run 'rebuild'", + iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE ); } + }else{ + pConfig->iVersion = iVersion; } if( rc==SQLITE_OK ){ @@ -228914,6 +240490,10 @@ static int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ /* #include "fts5Int.h" */ /* #include "fts5parse.h" */ +#ifndef SQLITE_FTS5_MAX_EXPR_DEPTH +# define SQLITE_FTS5_MAX_EXPR_DEPTH 256 +#endif + /* ** All token types in the generated fts5parse.h file are greater than 0. */ @@ -228954,11 +240534,17 @@ struct Fts5Expr { ** FTS5_NOT (nChild, apChild valid) ** FTS5_STRING (pNear valid) ** FTS5_TERM (pNear valid) +** +** iHeight: +** Distance from this node to furthest leaf. This is always 0 for nodes +** of type FTS5_STRING and FTS5_TERM. For all other nodes it is one +** greater than the largest child value. */ struct Fts5ExprNode { int eType; /* Node type */ int bEof; /* True at EOF */ int bNomatch; /* True if entry is not a match */ + int iHeight; /* Distance to tree leaf nodes */ /* Next method for this node. */ int (*xNext)(Fts5Expr*, Fts5ExprNode*, int, i64); @@ -228987,7 +240573,9 @@ struct Fts5ExprNode { struct Fts5ExprTerm { u8 bPrefix; /* True for a prefix term */ u8 bFirst; /* True if token must be first in column */ - char *zTerm; /* nul-terminated term */ + char *pTerm; /* Term data */ + int nQueryTerm; /* Effective size of term in bytes */ + int nFullTerm; /* Size of term in bytes incl. tokendata */ Fts5IndexIter *pIter; /* Iterator for this term */ Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */ }; @@ -229028,6 +240616,31 @@ struct Fts5Parse { int bPhraseToAnd; /* Convert "a+b" to "a AND b" */ }; +/* +** Check that the Fts5ExprNode.iHeight variables are set correctly in +** the expression tree passed as the only argument. +*/ +#ifndef NDEBUG +static void assert_expr_depth_ok(int rc, Fts5ExprNode *p){ + if( rc==SQLITE_OK ){ + if( p->eType==FTS5_TERM || p->eType==FTS5_STRING || p->eType==0 ){ + assert( p->iHeight==0 ); + }else{ + int ii; + int iMaxChild = 0; + for(ii=0; iinChild; ii++){ + Fts5ExprNode *pChild = p->apChild[ii]; + iMaxChild = MAX(iMaxChild, pChild->iHeight); + assert_expr_depth_ok(SQLITE_OK, pChild); + } + assert( p->iHeight==iMaxChild+1 ); + } + } +} +#else +# define assert_expr_depth_ok(rc, p) +#endif + static void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){ va_list ap; va_start(ap, zFmt); @@ -229142,6 +240755,8 @@ static int sqlite3Fts5ExprNew( }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF ); sqlite3Fts5ParserFree(pEngine, fts5ParseFree); + assert_expr_depth_ok(sParse.rc, sParse.pExpr); + /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ if( iColnCol && sParse.pExpr && sParse.rc==SQLITE_OK ){ @@ -229186,6 +240801,19 @@ static int sqlite3Fts5ExprNew( return sParse.rc; } +/* +** Assuming that buffer z is at least nByte bytes in size and contains a +** valid utf-8 string, return the number of characters in the string. +*/ +static int fts5ExprCountChar(const char *z, int nByte){ + int nRet = 0; + int ii; + for(ii=0; ii=3 ){ + + if( fts5ExprCountChar(&zText[iFirst], i-iFirst)>=3 ){ int jj; zExpr[iOut++] = '"'; for(jj=iFirst; jjnPhrase + p2->nPhrase; @@ -229315,7 +240944,7 @@ static int sqlite3Fts5ExprAnd(Fts5Expr **pp1, Fts5Expr *p2){ } sqlite3_free(p2->apExprPhrase); sqlite3_free(p2); - }else{ + }else if( p2 ){ *pp1 = p2; } @@ -229813,7 +241442,7 @@ static int fts5ExprNearInitAll( p->pIter = 0; } rc = sqlite3Fts5IndexQuery( - pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm), + pExpr->pIndex, p->pTerm, p->nQueryTerm, (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) | (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0), pNear->pColset, @@ -230450,7 +242079,7 @@ static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){ Fts5ExprTerm *pSyn; Fts5ExprTerm *pNext; Fts5ExprTerm *pTerm = &pPhrase->aTerm[i]; - sqlite3_free(pTerm->zTerm); + sqlite3_free(pTerm->pTerm); sqlite3Fts5IterClose(pTerm->pIter); for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){ pNext = pSyn->pSynonym; @@ -230548,6 +242177,7 @@ static Fts5ExprNearset *sqlite3Fts5ParseNearset( typedef struct TokenCtx TokenCtx; struct TokenCtx { Fts5ExprPhrase *pPhrase; + Fts5Config *pConfig; int rc; }; @@ -230581,8 +242211,12 @@ static int fts5ParseTokenize( rc = SQLITE_NOMEM; }else{ memset(pSyn, 0, (size_t)nByte); - pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); - memcpy(pSyn->zTerm, pToken, nToken); + pSyn->pTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); + pSyn->nFullTerm = pSyn->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata ){ + pSyn->nQueryTerm = (int)strlen(pSyn->pTerm); + } + memcpy(pSyn->pTerm, pToken, nToken); pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym; pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn; } @@ -230607,7 +242241,11 @@ static int fts5ParseTokenize( if( rc==SQLITE_OK ){ pTerm = &pPhrase->aTerm[pPhrase->nTerm++]; memset(pTerm, 0, sizeof(Fts5ExprTerm)); - pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->pTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->nFullTerm = pTerm->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata && rc==SQLITE_OK ){ + pTerm->nQueryTerm = (int)strlen(pTerm->pTerm); + } } } @@ -230674,6 +242312,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm( memset(&sCtx, 0, sizeof(TokenCtx)); sCtx.pPhrase = pAppend; + sCtx.pConfig = pConfig; rc = fts5ParseStringFromToken(pToken, &z); if( rc==SQLITE_OK ){ @@ -230721,12 +242360,15 @@ static int sqlite3Fts5ExprClonePhrase( Fts5Expr **ppNew ){ int rc = SQLITE_OK; /* Return code */ - Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */ + Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */ - TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */ - - pOrig = pExpr->apExprPhrase[iPhrase]; - pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */ + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + rc = SQLITE_RANGE; + }else{ + pOrig = pExpr->apExprPhrase[iPhrase]; + pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + } if( rc==SQLITE_OK ){ pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase*)); @@ -230739,7 +242381,7 @@ static int sqlite3Fts5ExprClonePhrase( pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); } - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; if( pColsetOrig ){ sqlite3_int64 nByte; @@ -230753,26 +242395,27 @@ static int sqlite3Fts5ExprClonePhrase( } } - if( pOrig->nTerm ){ - int i; /* Used to iterate through phrase terms */ - for(i=0; rc==SQLITE_OK && inTerm; i++){ - int tflags = 0; - Fts5ExprTerm *p; - for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ - const char *zTerm = p->zTerm; - rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm), - 0, 0); - tflags = FTS5_TOKEN_COLOCATED; - } - if( rc==SQLITE_OK ){ - sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; - sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + if( rc==SQLITE_OK ){ + if( pOrig->nTerm ){ + int i; /* Used to iterate through phrase terms */ + sCtx.pConfig = pExpr->pConfig; + for(i=0; rc==SQLITE_OK && inTerm; i++){ + int tflags = 0; + Fts5ExprTerm *p; + for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ + rc = fts5ParseTokenize((void*)&sCtx,tflags,p->pTerm,p->nFullTerm,0,0); + tflags = FTS5_TOKEN_COLOCATED; + } + if( rc==SQLITE_OK ){ + sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; + sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + } } + }else{ + /* This happens when parsing a token or quoted phrase that contains + ** no token characters at all. (e.g ... MATCH '""'). */ + sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } - }else{ - /* This happens when parsing a token or quoted phrase that contains - ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){ @@ -231089,6 +242732,7 @@ static void fts5ExprAssignXNext(Fts5ExprNode *pNode){ } static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ + int ii = p->nChild; if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){ int nByte = sizeof(Fts5ExprNode*) * pSub->nChild; memcpy(&p->apChild[p->nChild], pSub->apChild, nByte); @@ -231097,6 +242741,9 @@ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ }else{ p->apChild[p->nChild++] = pSub; } + for( ; iinChild; ii++){ + p->iHeight = MAX(p->iHeight, p->apChild[ii]->iHeight + 1); + } } /* @@ -231127,6 +242774,7 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( if( pRet ){ pRet->eType = FTS5_AND; pRet->nChild = nTerm; + pRet->iHeight = 1; fts5ExprAssignXNext(pRet); pParse->nPhrase--; for(ii=0; iiapPhrase[0]->aTerm[ii]; + Fts5ExprTerm *pTo = &pPhrase->aTerm[0]; pParse->apPhrase[pParse->nPhrase++] = pPhrase; pPhrase->nTerm = 1; - pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup( - &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1 - ); + pTo->pTerm = sqlite3Fts5Strndup(&pParse->rc, p->pTerm, p->nFullTerm); + pTo->nQueryTerm = p->nQueryTerm; + pTo->nFullTerm = p->nFullTerm; pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase) ); @@ -231232,6 +242882,14 @@ static Fts5ExprNode *sqlite3Fts5ParseNode( }else{ fts5ExprAddChildren(pRet, pLeft); fts5ExprAddChildren(pRet, pRight); + if( pRet->iHeight>SQLITE_FTS5_MAX_EXPR_DEPTH ){ + sqlite3Fts5ParseError(pParse, + "fts5 expression tree is too large (maximum depth %d)", + SQLITE_FTS5_MAX_EXPR_DEPTH + ); + sqlite3_free(pRet); + pRet = 0; + } } } } @@ -231310,7 +242968,7 @@ static Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( return pRet; } -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ sqlite3_int64 nByte = 0; Fts5ExprTerm *p; @@ -231318,16 +242976,17 @@ static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ /* Determine the maximum amount of space required. */ for(p=pTerm; p; p=p->pSynonym){ - nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2; + nByte += pTerm->nQueryTerm * 2 + 3 + 2; } zQuoted = sqlite3_malloc64(nByte); if( zQuoted ){ int i = 0; for(p=pTerm; p; p=p->pSynonym){ - char *zIn = p->zTerm; + char *zIn = p->pTerm; + char *zEnd = &zIn[p->nQueryTerm]; zQuoted[i++] = '"'; - while( *zIn ){ + while( zInnTerm; iTerm++){ - char *zTerm = pPhrase->aTerm[iTerm].zTerm; - zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm); + Fts5ExprTerm *p = &pPhrase->aTerm[iTerm]; + zRet = fts5PrintfAppend(zRet, "%s%.*s", iTerm==0?"":" ", + p->nQueryTerm, p->pTerm + ); if( pPhrase->aTerm[iTerm].bPrefix ){ zRet = fts5PrintfAppend(zRet, "*"); } @@ -231416,6 +243077,8 @@ static char *fts5ExprPrintTcl( if( zRet==0 ) return 0; } + }else if( pExpr->eType==0 ){ + zRet = sqlite3_mprintf("{}"); }else{ char const *zOp = 0; int i; @@ -231677,14 +243340,14 @@ static void fts5ExprFold( sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics)); } } -#endif /* ifdef SQLITE_TEST */ +#endif /* if SQLITE_TEST || SQLITE_FTS5_DEBUG */ /* ** This is called during initialization to register the fts5_expr() scalar ** UDF with the SQLite handle passed as the only argument. */ static int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) struct Fts5ExprFunc { const char *z; void (*x)(sqlite3_context*,int,sqlite3_value**); @@ -231805,6 +243468,17 @@ static int fts5ExprColsetTest(Fts5Colset *pColset, int iCol){ return 0; } +/* +** pToken is a buffer nToken bytes in size that may or may not contain +** an embedded 0x00 byte. If it does, return the number of bytes in +** the buffer before the 0x00. If it does not, return nToken. +*/ +static int fts5QueryTerm(const char *pToken, int nToken){ + int ii; + for(ii=0; iipExpr; int i; + int nQuery = nToken; + i64 iRowid = pExpr->pRoot->iRowid; UNUSED_PARAM2(iUnused1, iUnused2); - if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE; + if( nQuery>FTS5_MAX_TOKEN_SIZE ) nQuery = FTS5_MAX_TOKEN_SIZE; + if( pExpr->pConfig->bTokendata ){ + nQuery = fts5QueryTerm(pToken, nQuery); + } if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++; for(i=0; inPhrase; i++){ - Fts5ExprTerm *pTerm; + Fts5ExprTerm *pT; if( p->aPopulator[i].bOk==0 ) continue; - for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){ - int nTerm = (int)strlen(pTerm->zTerm); - if( (nTerm==nToken || (nTermbPrefix)) - && memcmp(pTerm->zTerm, pToken, nTerm)==0 + for(pT=&pExpr->apExprPhrase[i]->aTerm[0]; pT; pT=pT->pSynonym){ + if( (pT->nQueryTerm==nQuery || (pT->nQueryTermbPrefix)) + && memcmp(pT->pTerm, pToken, pT->nQueryTerm)==0 ){ int rc = sqlite3Fts5PoslistWriterAppend( &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff ); + if( rc==SQLITE_OK && pExpr->pConfig->bTokendata && !pT->bPrefix ){ + int iCol = p->iOff>>32; + int iTokOff = p->iOff & 0x7FFFFFFF; + rc = sqlite3Fts5IndexIterWriteTokendata( + pT->pIter, pToken, nToken, iRowid, iCol, iTokOff + ); + } if( rc ) return rc; break; } @@ -231967,6 +243652,83 @@ static int sqlite3Fts5ExprPhraseCollist( return rc; } +/* +** Does the work of the fts5_api.xQueryToken() API method. +*/ +static int sqlite3Fts5ExprQueryToken( + Fts5Expr *pExpr, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + + *ppOut = pPhrase->aTerm[iToken].pTerm; + *pnOut = pPhrase->aTerm[iToken].nFullTerm; + return SQLITE_OK; +} + +/* +** Does the work of the fts5_api.xInstToken() API method. +*/ +static int sqlite3Fts5ExprInstToken( + Fts5Expr *pExpr, + i64 iRowid, + int iPhrase, + int iCol, + int iOff, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + Fts5ExprTerm *pTerm = 0; + int rc = SQLITE_OK; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + pTerm = &pPhrase->aTerm[iToken]; + if( pTerm->bPrefix==0 ){ + if( pExpr->pConfig->bTokendata ){ + rc = sqlite3Fts5IterToken( + pTerm->pIter, iRowid, iCol, iOff+iToken, ppOut, pnOut + ); + }else{ + *ppOut = pTerm->pTerm; + *pnOut = pTerm->nFullTerm; + } + } + return rc; +} + +/* +** Clear the token mappings for all Fts5IndexIter objects mannaged by +** the expression passed as the only argument. +*/ +static void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){ + int ii; + for(ii=0; iinPhrase; ii++){ + Fts5ExprTerm *pT; + for(pT=&pExpr->apExprPhrase[ii]->aTerm[0]; pT; pT=pT->pSynonym){ + sqlite3Fts5IndexIterClearTokendata(pT->pIter); + } + } +} + /* ** 2014 August 11 ** @@ -232005,10 +243767,15 @@ struct Fts5Hash { /* ** Each entry in the hash table is represented by an object of the -** following type. Each object, its key (a nul-terminated string) and -** its current data are stored in a single memory allocation. The -** key immediately follows the object in memory. The position list -** data immediately follows the key data in memory. +** following type. Each object, its key, and its current data are stored +** in a single memory allocation. The key immediately follows the object +** in memory. The position list data immediately follows the key data +** in memory. +** +** The key is Fts5HashEntry.nKey bytes in size. It consists of a single +** byte identifying the index (either the main term index or a prefix-index), +** followed by the term data. For example: "0token". There is no +** nul-terminator - in this case nKey=6. ** ** The data that follows the key is in a similar, but not identical format ** to the doclist data stored in the database. It is: @@ -232143,8 +243910,7 @@ static int fts5HashResize(Fts5Hash *pHash){ unsigned int iHash; Fts5HashEntry *p = apOld[i]; apOld[i] = p->pHashNext; - iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), - (int)strlen(fts5EntryKey(p))); + iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), p->nKey); p->pHashNext = apNew[iHash]; apNew[iHash] = p; } @@ -232228,7 +243994,7 @@ static int sqlite3Fts5HashWrite( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ char *zKey = fts5EntryKey(p); if( zKey[0]==bByte - && p->nKey==nToken + && p->nKey==nToken+1 && memcmp(&zKey[1], pToken, nToken)==0 ){ break; @@ -232258,9 +244024,9 @@ static int sqlite3Fts5HashWrite( zKey[0] = bByte; memcpy(&zKey[1], pToken, nToken); assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) ); - p->nKey = nToken; + p->nKey = nToken+1; zKey[nToken+1] = '\0'; - p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry); + p->nData = nToken+1 + sizeof(Fts5HashEntry); p->pHashNext = pHash->aSlot[iHash]; pHash->aSlot[iHash] = p; pHash->nEntry++; @@ -232377,12 +244143,17 @@ static Fts5HashEntry *fts5HashEntryMerge( *ppOut = p1; p1 = 0; }else{ - int i = 0; char *zKey1 = fts5EntryKey(p1); char *zKey2 = fts5EntryKey(p2); - while( zKey1[i]==zKey2[i] ) i++; + int nMin = MIN(p1->nKey, p2->nKey); - if( ((u8)zKey1[i])>((u8)zKey2[i]) ){ + int cmp = memcmp(zKey1, zKey2, nMin); + if( cmp==0 ){ + cmp = p1->nKey - p2->nKey; + } + assert( cmp!=0 ); + + if( cmp>0 ){ /* p2 is smaller */ *ppOut = p2; ppOut = &p2->pScanNext; @@ -232401,10 +244172,8 @@ static Fts5HashEntry *fts5HashEntryMerge( } /* -** Extract all tokens from hash table iHash and link them into a list -** in sorted order. The hash table is cleared before returning. It is -** the responsibility of the caller to free the elements of the returned -** list. +** Link all tokens from hash table iHash into a list in sorted order. The +** tokens are not removed from the hash table. */ static int fts5HashEntrySort( Fts5Hash *pHash, @@ -232426,7 +244195,7 @@ static int fts5HashEntrySort( Fts5HashEntry *pIter; for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){ if( pTerm==0 - || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) + || (pIter->nKey>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) ){ Fts5HashEntry *pEntry = pIter; pEntry->pScanNext = 0; @@ -232444,7 +244213,6 @@ static int fts5HashEntrySort( pList = fts5HashEntryMerge(pList, ap[i]); } - pHash->nEntry = 0; sqlite3_free(ap); *ppSorted = pList; return SQLITE_OK; @@ -232466,12 +244234,11 @@ static int sqlite3Fts5HashQuery( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ zKey = fts5EntryKey(p); - assert( p->nKey+1==(int)strlen(zKey) ); - if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break; + if( nTerm==p->nKey && memcmp(zKey, pTerm, nTerm)==0 ) break; } if( p ){ - int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1; + int nHashPre = sizeof(Fts5HashEntry) + nTerm; int nList = p->nData - nHashPre; u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10)); if( pRet ){ @@ -232498,6 +244265,28 @@ static int sqlite3Fts5HashScanInit( return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan); } +#ifdef SQLITE_DEBUG +static int fts5HashCount(Fts5Hash *pHash){ + int nEntry = 0; + int ii; + for(ii=0; iinSlot; ii++){ + Fts5HashEntry *p = 0; + for(p=pHash->aSlot[ii]; p; p=p->pHashNext){ + nEntry++; + } + } + return nEntry; +} +#endif + +/* +** Return true if the hash table is empty, false otherwise. +*/ +static int sqlite3Fts5HashIsEmpty(Fts5Hash *pHash){ + assert( pHash->nEntry==fts5HashCount(pHash) ); + return pHash->nEntry==0; +} + static void sqlite3Fts5HashScanNext(Fts5Hash *p){ assert( !sqlite3Fts5HashScanEof(p) ); p->pScan = p->pScan->pScanNext; @@ -232510,19 +244299,22 @@ static int sqlite3Fts5HashScanEof(Fts5Hash *p){ static void sqlite3Fts5HashScanEntry( Fts5Hash *pHash, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ){ Fts5HashEntry *p; if( (p = pHash->pScan) ){ char *zKey = fts5EntryKey(p); - int nTerm = (int)strlen(zKey); + int nTerm = p->nKey; fts5HashAddPoslistSize(pHash, p, 0); *pzTerm = zKey; - *ppDoclist = (const u8*)&zKey[nTerm+1]; - *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1); + *pnTerm = nTerm; + *ppDoclist = (const u8*)&zKey[nTerm]; + *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm); }else{ *pzTerm = 0; + *pnTerm = 0; *ppDoclist = 0; *pnDoclist = 0; } @@ -232584,6 +244376,26 @@ static void sqlite3Fts5HashScanEntry( # error "FTS5_MAX_PREFIX_INDEXES is too large" #endif +#define FTS5_MAX_LEVEL 64 + +/* +** There are two versions of the format used for the structure record: +** +** 1. the legacy format, that may be read by all fts5 versions, and +** +** 2. the V2 format, which is used by contentless_delete=1 databases. +** +** Both begin with a 4-byte "configuration cookie" value. Then, a legacy +** format structure record contains a varint - the number of levels in +** the structure. Whereas a V2 structure record contains the constant +** 4 bytes [0xff 0x00 0x00 0x01]. This is unambiguous as the value of a +** varint has to be at least 16256 to begin with "0xFF". And the default +** maximum number of levels is 64. +** +** See below for more on structure record formats. +*/ +#define FTS5_STRUCTURE_V2 "\xFF\x00\x00\x01" + /* ** Details: ** @@ -232591,7 +244403,7 @@ static void sqlite3Fts5HashScanEntry( ** ** CREATE TABLE %_data(id INTEGER PRIMARY KEY, block BLOB); ** -** , contains the following 5 types of records. See the comments surrounding +** , contains the following 6 types of records. See the comments surrounding ** the FTS5_*_ROWID macros below for a description of how %_data rowids are ** assigned to each fo them. ** @@ -232600,12 +244412,12 @@ static void sqlite3Fts5HashScanEntry( ** The set of segments that make up an index - the index structure - are ** recorded in a single record within the %_data table. The record consists ** of a single 32-bit configuration cookie value followed by a list of -** SQLite varints. If the FTS table features more than one index (because -** there are one or more prefix indexes), it is guaranteed that all share -** the same cookie value. +** SQLite varints. +** +** If the structure record is a V2 record, the configuration cookie is +** followed by the following 4 bytes: [0xFF 0x00 0x00 0x01]. ** -** Immediately following the configuration cookie, the record begins with -** three varints: +** Next, the record continues with three varints: ** ** + number of levels, ** + total number of segments on all levels, @@ -232620,6 +244432,12 @@ static void sqlite3Fts5HashScanEntry( ** + first leaf page number (often 1, always greater than 0) ** + final leaf page number ** +** Then, for V2 structures only: +** +** + lower origin counter value, +** + upper origin counter value, +** + the number of tombstone hash pages. +** ** 2. The Averages Record: ** ** A single record within the %_data table. The data is a list of varints. @@ -232735,6 +244553,38 @@ static void sqlite3Fts5HashScanEntry( ** * A list of delta-encoded varints - the first rowid on each subsequent ** child page. ** +** 6. Tombstone Hash Page +** +** These records are only ever present in contentless_delete=1 tables. +** There are zero or more of these associated with each segment. They +** are used to store the tombstone rowids for rows contained in the +** associated segments. +** +** The set of nHashPg tombstone hash pages associated with a single +** segment together form a single hash table containing tombstone rowids. +** To find the page of the hash on which a key might be stored: +** +** iPg = (rowid % nHashPg) +** +** Then, within page iPg, which has nSlot slots: +** +** iSlot = (rowid / nHashPg) % nSlot +** +** Each tombstone hash page begins with an 8 byte header: +** +** 1-byte: Key-size (the size in bytes of each slot). Either 4 or 8. +** 1-byte: rowid-0-tombstone flag. This flag is only valid on the +** first tombstone hash page for each segment (iPg=0). If set, +** the hash table contains rowid 0. If clear, it does not. +** Rowid 0 is handled specially. +** 2-bytes: unused. +** 4-bytes: Big-endian integer containing number of entries on page. +** +** Following this are nSlot 4 or 8 byte slots (depending on the key-size +** in the first byte of the page header). The number of slots may be +** determined based on the size of the page record and the key-size: +** +** nSlot = (nByte - 8) / key-size */ /* @@ -232768,6 +244618,7 @@ static void sqlite3Fts5HashScanEntry( #define FTS5_SEGMENT_ROWID(segid, pgno) fts5_dri(segid, 0, 0, pgno) #define FTS5_DLIDX_ROWID(segid, height, pgno) fts5_dri(segid, 1, height, pgno) +#define FTS5_TOMBSTONE_ROWID(segid,ipg) fts5_dri(segid+(1<<16), 0, 0, ipg) #ifdef SQLITE_DEBUG static int sqlite3Fts5Corrupt() { return SQLITE_CORRUPT_VTAB; } @@ -232794,6 +244645,9 @@ typedef struct Fts5SegWriter Fts5SegWriter; typedef struct Fts5Structure Fts5Structure; typedef struct Fts5StructureLevel Fts5StructureLevel; typedef struct Fts5StructureSegment Fts5StructureSegment; +typedef struct Fts5TokenDataIter Fts5TokenDataIter; +typedef struct Fts5TokenDataMap Fts5TokenDataMap; +typedef struct Fts5TombstoneArray Fts5TombstoneArray; struct Fts5Data { u8 *p; /* Pointer to buffer containing record */ @@ -232803,6 +244657,12 @@ struct Fts5Data { /* ** One object per %_data table. +** +** nContentlessDelete: +** The number of contentless delete operations since the most recent +** call to fts5IndexFlush() or fts5IndexDiscardData(). This is tracked +** so that extra auto-merge work can be done by fts5IndexFlush() to +** account for the delete operations. */ struct Fts5Index { Fts5Config *pConfig; /* Virtual table configuration */ @@ -232817,9 +244677,12 @@ struct Fts5Index { int nPendingData; /* Current bytes of pending data */ i64 iWriteRowid; /* Rowid for current doc being written */ int bDelete; /* Current write is a delete */ + int nContentlessDelete; /* Number of contentless delete ops */ + int nPendingRow; /* Number of INSERT in hash table */ /* Error state. */ int rc; /* Current error code */ + int flushRc; /* State used by the fts5DataXXX() functions. */ sqlite3_blob *pReader; /* RO incr-blob open on %_data table */ @@ -232828,8 +244691,11 @@ struct Fts5Index { sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */ sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */ sqlite3_stmt *pIdxSelect; + sqlite3_stmt *pIdxNextSelect; int nRead; /* Total number of blocks read */ + sqlite3_stmt *pDeleteFromIdx; + sqlite3_stmt *pDataVersion; i64 iStructVersion; /* data_version when pStruct read */ Fts5Structure *pStruct; /* Current db structure (or NULL) */ @@ -232849,11 +244715,23 @@ struct Fts5DoclistIter { ** The contents of the "structure" record for each index are represented ** using an Fts5Structure record in memory. Which uses instances of the ** other Fts5StructureXXX types as components. +** +** nOriginCntr: +** This value is set to non-zero for structure records created for +** contentlessdelete=1 tables only. In that case it represents the +** origin value to apply to the next top-level segment created. */ struct Fts5StructureSegment { int iSegid; /* Segment id */ int pgnoFirst; /* First leaf page number in segment */ int pgnoLast; /* Last leaf page number in segment */ + + /* contentlessdelete=1 tables only: */ + u64 iOrigin1; + u64 iOrigin2; + int nPgTombstone; /* Number of tombstone hash table pages */ + u64 nEntryTombstone; /* Number of tombstone entries that "count" */ + u64 nEntry; /* Number of rows in this segment */ }; struct Fts5StructureLevel { int nMerge; /* Number of segments in incr-merge */ @@ -232863,6 +244741,7 @@ struct Fts5StructureLevel { struct Fts5Structure { int nRef; /* Object reference count */ u64 nWriteCounter; /* Total leaves written to level 0 */ + u64 nOriginCntr; /* Origin value for next top-level segment */ int nSegment; /* Total segments in this structure */ int nLevel; /* Number of levels in this index */ Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */ @@ -232922,9 +244801,6 @@ struct Fts5CResult { ** iLeafOffset: ** Byte offset within the current leaf that is the first byte of the ** position list data (one byte passed the position-list size field). -** rowid field of the current entry. Usually this is the size field of the -** position list data. The exception is if the rowid for the current entry -** is the last thing on the leaf page. ** ** pLeaf: ** Buffer containing current leaf page data. Set to NULL at EOF. @@ -232954,6 +244830,13 @@ struct Fts5CResult { ** ** iTermIdx: ** Index of current term on iTermLeafPgno. +** +** apTombstone/nTombstone: +** These are used for contentless_delete=1 tables only. When the cursor +** is first allocated, the apTombstone[] array is allocated so that it +** is large enough for all tombstones hash pages associated with the +** segment. The pages themselves are loaded lazily from the database as +** they are required. */ struct Fts5SegIter { Fts5StructureSegment *pSeg; /* Segment to iterate through */ @@ -232962,6 +244845,7 @@ struct Fts5SegIter { Fts5Data *pLeaf; /* Current leaf data */ Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */ i64 iLeafOffset; /* Byte offset within current leaf */ + Fts5TombstoneArray *pTombArray; /* Array of tombstone pages */ /* Next method */ void (*xNext)(Fts5Index*, Fts5SegIter*, int*); @@ -232988,6 +244872,15 @@ struct Fts5SegIter { u8 bDel; /* True if the delete flag is set */ }; +/* +** Array of tombstone pages. Reference counted. +*/ +struct Fts5TombstoneArray { + int nRef; /* Number of pointers to this object */ + int nTombstone; + Fts5Data *apTombstone[1]; /* Array of tombstone pages */ +}; + /* ** Argument is a pointer to an Fts5Data structure that contains a ** leaf page. @@ -233032,9 +244925,16 @@ struct Fts5SegIter { ** poslist: ** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered. ** There is no way to tell if this is populated or not. +** +** pColset: +** If not NULL, points to an object containing a set of column indices. +** Only matches that occur in one of these columns will be returned. +** The Fts5Iter does not own the Fts5Colset object, and so it is not +** freed when the iterator is closed - it is owned by the upper layer. */ struct Fts5Iter { Fts5IndexIter base; /* Base class containing output vars */ + Fts5TokenDataIter *pTokenDataIter; Fts5Index *pIndex; /* Index that owns this iterator */ Fts5Buffer poslist; /* Buffer containing current poslist */ @@ -233052,7 +244952,6 @@ struct Fts5Iter { Fts5SegIter aSeg[1]; /* Array of segment iterators */ }; - /* ** An instance of the following type is used to iterate through the contents ** of a doclist-index record. @@ -233091,6 +244990,60 @@ static u16 fts5GetU16(const u8 *aIn){ return ((u16)aIn[0] << 8) + aIn[1]; } +/* +** The only argument points to a buffer at least 8 bytes in size. This +** function interprets the first 8 bytes of the buffer as a 64-bit big-endian +** unsigned integer and returns the result. +*/ +static u64 fts5GetU64(u8 *a){ + return ((u64)a[0] << 56) + + ((u64)a[1] << 48) + + ((u64)a[2] << 40) + + ((u64)a[3] << 32) + + ((u64)a[4] << 24) + + ((u64)a[5] << 16) + + ((u64)a[6] << 8) + + ((u64)a[7] << 0); +} + +/* +** The only argument points to a buffer at least 4 bytes in size. This +** function interprets the first 4 bytes of the buffer as a 32-bit big-endian +** unsigned integer and returns the result. +*/ +static u32 fts5GetU32(const u8 *a){ + return ((u32)a[0] << 24) + + ((u32)a[1] << 16) + + ((u32)a[2] << 8) + + ((u32)a[3] << 0); +} + +/* +** Write iVal, formated as a 64-bit big-endian unsigned integer, to the +** buffer indicated by the first argument. +*/ +static void fts5PutU64(u8 *a, u64 iVal){ + a[0] = ((iVal >> 56) & 0xFF); + a[1] = ((iVal >> 48) & 0xFF); + a[2] = ((iVal >> 40) & 0xFF); + a[3] = ((iVal >> 32) & 0xFF); + a[4] = ((iVal >> 24) & 0xFF); + a[5] = ((iVal >> 16) & 0xFF); + a[6] = ((iVal >> 8) & 0xFF); + a[7] = ((iVal >> 0) & 0xFF); +} + +/* +** Write iVal, formated as a 32-bit big-endian unsigned integer, to the +** buffer indicated by the first argument. +*/ +static void fts5PutU32(u8 *a, u32 iVal){ + a[0] = ((iVal >> 24) & 0xFF); + a[1] = ((iVal >> 16) & 0xFF); + a[2] = ((iVal >> 8) & 0xFF); + a[3] = ((iVal >> 0) & 0xFF); +} + /* ** Allocate and return a buffer at least nByte bytes in size. ** @@ -233318,10 +245271,17 @@ static void fts5DataDelete(Fts5Index *p, i64 iFirst, i64 iLast){ /* ** Remove all records associated with segment iSegid. */ -static void fts5DataRemoveSegment(Fts5Index *p, int iSegid){ +static void fts5DataRemoveSegment(Fts5Index *p, Fts5StructureSegment *pSeg){ + int iSegid = pSeg->iSegid; i64 iFirst = FTS5_SEGMENT_ROWID(iSegid, 0); i64 iLast = FTS5_SEGMENT_ROWID(iSegid+1, 0)-1; fts5DataDelete(p, iFirst, iLast); + + if( pSeg->nPgTombstone ){ + i64 iTomb1 = FTS5_TOMBSTONE_ROWID(iSegid, 0); + i64 iTomb2 = FTS5_TOMBSTONE_ROWID(iSegid, pSeg->nPgTombstone-1); + fts5DataDelete(p, iTomb1, iTomb2); + } if( p->pIdxDeleter==0 ){ Fts5Config *pConfig = p->pConfig; fts5IndexPrepareStmt(p, &p->pIdxDeleter, sqlite3_mprintf( @@ -233432,11 +245392,19 @@ static int fts5StructureDecode( int nSegment = 0; sqlite3_int64 nByte; /* Bytes of space to allocate at pRet */ Fts5Structure *pRet = 0; /* Structure object to return */ + int bStructureV2 = 0; /* True for FTS5_STRUCTURE_V2 */ + u64 nOriginCntr = 0; /* Largest origin value seen so far */ /* Grab the cookie value */ if( piCookie ) *piCookie = sqlite3Fts5Get32(pData); i = 4; + /* Check if this is a V2 structure record. Set bStructureV2 if it is. */ + if( 0==memcmp(&pData[i], FTS5_STRUCTURE_V2, 4) ){ + i += 4; + bStructureV2 = 1; + } + /* Read the total number of levels and segments from the start of the ** structure record. */ i += fts5GetVarint32(&pData[i], nLevel); @@ -233483,9 +245451,18 @@ static int fts5StructureDecode( rc = FTS5_CORRUPT; break; } + assert( pSeg!=0 ); i += fts5GetVarint32(&pData[i], pSeg->iSegid); i += fts5GetVarint32(&pData[i], pSeg->pgnoFirst); i += fts5GetVarint32(&pData[i], pSeg->pgnoLast); + if( bStructureV2 ){ + i += fts5GetVarint(&pData[i], &pSeg->iOrigin1); + i += fts5GetVarint(&pData[i], &pSeg->iOrigin2); + i += fts5GetVarint32(&pData[i], pSeg->nPgTombstone); + i += fts5GetVarint(&pData[i], &pSeg->nEntryTombstone); + i += fts5GetVarint(&pData[i], &pSeg->nEntry); + nOriginCntr = MAX(nOriginCntr, pSeg->iOrigin2); + } if( pSeg->pgnoLastpgnoFirst ){ rc = FTS5_CORRUPT; break; @@ -233496,6 +245473,9 @@ static int fts5StructureDecode( } } if( nSegment!=0 && rc==SQLITE_OK ) rc = FTS5_CORRUPT; + if( bStructureV2 ){ + pRet->nOriginCntr = nOriginCntr+1; + } if( rc!=SQLITE_OK ){ fts5StructureRelease(pRet); @@ -233513,6 +245493,7 @@ static int fts5StructureDecode( */ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ fts5StructureMakeWritable(pRc, ppStruct); + assert( (ppStruct!=0 && (*ppStruct)!=0) || (*pRc)!=SQLITE_OK ); if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel; @@ -233707,6 +245688,7 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){ Fts5Buffer buf; /* Buffer to serialize record into */ int iLvl; /* Used to iterate through levels */ int iCookie; /* Cookie value to store */ + int nHdr = (pStruct->nOriginCntr>0 ? (4+4+9+9+9) : (4+9+9)); assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) ); memset(&buf, 0, sizeof(Fts5Buffer)); @@ -233715,9 +245697,12 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){ iCookie = p->pConfig->iCookie; if( iCookie<0 ) iCookie = 0; - if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, 4+9+9+9) ){ + if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, nHdr) ){ sqlite3Fts5Put32(buf.p, iCookie); buf.n = 4; + if( pStruct->nOriginCntr>0 ){ + fts5BufferSafeAppendBlob(&buf, FTS5_STRUCTURE_V2, 4); + } fts5BufferSafeAppendVarint(&buf, pStruct->nLevel); fts5BufferSafeAppendVarint(&buf, pStruct->nSegment); fts5BufferSafeAppendVarint(&buf, (i64)pStruct->nWriteCounter); @@ -233731,9 +245716,17 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){ assert( pLvl->nMerge<=pLvl->nSeg ); for(iSeg=0; iSegnSeg; iSeg++){ - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].iSegid); - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoFirst); - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoLast); + Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg]; + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iSegid); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoFirst); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoLast); + if( pStruct->nOriginCntr>0 ){ + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin1); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin2); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nPgTombstone); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntryTombstone); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntry); + } } } @@ -233876,9 +245869,9 @@ static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){ } if( iOffnn ){ - i64 iVal; + u64 iVal; pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1; - iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal); + iOff += fts5GetVarint(&pData->p[iOff], &iVal); pLvl->iRowid += iVal; pLvl->iOff = iOff; }else{ @@ -233971,42 +245964,25 @@ static int fts5DlidxLvlPrev(Fts5DlidxLvl *pLvl){ pLvl->bEof = 1; }else{ u8 *a = pLvl->pData->p; - i64 iVal; - int iLimit; - int ii; - int nZero = 0; - - /* Currently iOff points to the first byte of a varint. This block - ** decrements iOff until it points to the first byte of the previous - ** varint. Taking care not to read any memory locations that occur - ** before the buffer in memory. */ - iLimit = (iOff>9 ? iOff-9 : 0); - for(iOff--; iOff>iLimit; iOff--){ - if( (a[iOff-1] & 0x80)==0 ) break; - } - - fts5GetVarint(&a[iOff], (u64*)&iVal); - pLvl->iRowid -= iVal; - pLvl->iLeafPgno--; - - /* Skip backwards past any 0x00 varints. */ - for(ii=iOff-1; ii>=pLvl->iFirstOff && a[ii]==0x00; ii--){ - nZero++; - } - if( ii>=pLvl->iFirstOff && (a[ii] & 0x80) ){ - /* The byte immediately before the last 0x00 byte has the 0x80 bit - ** set. So the last 0x00 is only a varint 0 if there are 8 more 0x80 - ** bytes before a[ii]. */ - int bZero = 0; /* True if last 0x00 counts */ - if( (ii-8)>=pLvl->iFirstOff ){ - int j; - for(j=1; j<=8 && (a[ii-j] & 0x80); j++); - bZero = (j>8); + + pLvl->iOff = 0; + fts5DlidxLvlNext(pLvl); + while( 1 ){ + int nZero = 0; + int ii = pLvl->iOff; + u64 delta = 0; + + while( a[ii]==0 ){ + nZero++; + ii++; } - if( bZero==0 ) nZero--; + ii += sqlite3Fts5GetVarint(&a[ii], &delta); + + if( ii>=iOff ) break; + pLvl->iLeafPgno += nZero+1; + pLvl->iRowid += delta; + pLvl->iOff = ii; } - pLvl->iLeafPgno -= nZero; - pLvl->iOff = iOff - nZero; } return pLvl->bEof; @@ -234202,7 +246178,7 @@ static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){ i64 iOff = pIter->iLeafOffset; ASSERT_SZLEAF_OK(pIter->pLeaf); - if( iOff>=pIter->pLeaf->szLeaf ){ + while( iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( pIter->pLeaf==0 ){ if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; @@ -234273,6 +246249,25 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){ } } +/* +** Allocate a tombstone hash page array object (pIter->pTombArray) for +** the iterator passed as the second argument. If an OOM error occurs, +** leave an error in the Fts5Index object. +*/ +static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ + const int nTomb = pIter->pSeg->nPgTombstone; + if( nTomb>0 ){ + int nByte = nTomb * sizeof(Fts5Data*) + sizeof(Fts5TombstoneArray); + Fts5TombstoneArray *pNew; + pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( pNew ){ + pNew->nTombstone = nTomb; + pNew->nRef = 1; + pIter->pTombArray = pNew; + } + } +} + /* ** Initialize the iterator object pIter to iterate through the entries in ** segment pSeg. The iterator is left pointing to the first entry when @@ -234301,10 +246296,12 @@ static void fts5SegIterInit( fts5SegIterSetNext(p, pIter); pIter->pSeg = pSeg; pIter->iLeafPgno = pSeg->pgnoFirst-1; - fts5SegIterNextPage(p, pIter); + do { + fts5SegIterNextPage(p, pIter); + }while( p->rc==SQLITE_OK && pIter->pLeaf && pIter->pLeaf->nn==4 ); } - if( p->rc==SQLITE_OK ){ + if( p->rc==SQLITE_OK && pIter->pLeaf ){ pIter->iLeafOffset = 4; assert( pIter->pLeaf!=0 ); assert_nc( pIter->pLeaf->nn>4 ); @@ -234312,6 +246309,7 @@ static void fts5SegIterInit( pIter->iPgidxOff = pIter->pLeaf->szLeaf+1; fts5SegIterLoadTerm(p, pIter, 0); fts5SegIterLoadNPos(p, pIter); + fts5SegIterAllocTombstone(p, pIter); } } @@ -234498,7 +246496,7 @@ static void fts5SegIterNext_None( iOff = pIter->iLeafOffset; /* Next entry is on the next page */ - if( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ + while( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( p->rc || pIter->pLeaf==0 ) return; pIter->iRowid = 0; @@ -234522,15 +246520,16 @@ static void fts5SegIterNext_None( }else{ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList; sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); if( pList==0 ) goto next_none_eof; pIter->pLeaf->p = (u8*)pList; pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList; - sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc,&pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); } @@ -234596,11 +246595,12 @@ static void fts5SegIterNext( }else if( pIter->pSeg==0 ){ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList = 0; assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm ); if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){ sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); } if( pList==0 ){ fts5DataRelease(pIter->pLeaf); @@ -234610,8 +246610,7 @@ static void fts5SegIterNext( pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList+1; - sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm), - (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc, &pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); *pbNewTerm = 1; } @@ -234691,7 +246690,7 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ Fts5Data *pLast = 0; int pgnoLast = 0; - if( pDlidx ){ + if( pDlidx && p->pConfig->iVersion==FTS5_CURRENT_VERSION ){ int iSegid = pIter->pSeg->iSegid; pgnoLast = fts5DlidxIterPgno(pDlidx); pLast = fts5LeafRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast)); @@ -234997,7 +246996,7 @@ static void fts5SegIterSeekInit( fts5LeafSeek(p, bGe, pIter, pTerm, nTerm); } - if( p->rc==SQLITE_OK && bGe==0 ){ + if( p->rc==SQLITE_OK && (bGe==0 || (flags & FTS5INDEX_QUERY_SCANONETERM)) ){ pIter->flags |= FTS5_SEGITER_ONETERM; if( pIter->pLeaf ){ if( flags & FTS5INDEX_QUERY_DESC ){ @@ -235013,6 +247012,9 @@ static void fts5SegIterSeekInit( } fts5SegIterSetNext(p, pIter); + if( 0==(flags & FTS5INDEX_QUERY_SCANONETERM) ){ + fts5SegIterAllocTombstone(p, pIter); + } /* Either: ** @@ -235029,6 +247031,79 @@ static void fts5SegIterSeekInit( ); } + +/* +** SQL used by fts5SegIterNextInit() to find the page to open. +*/ +static sqlite3_stmt *fts5IdxNextStmt(Fts5Index *p){ + if( p->pIdxNextSelect==0 ){ + Fts5Config *pConfig = p->pConfig; + fts5IndexPrepareStmt(p, &p->pIdxNextSelect, sqlite3_mprintf( + "SELECT pgno FROM '%q'.'%q_idx' WHERE " + "segid=? AND term>? ORDER BY term ASC LIMIT 1", + pConfig->zDb, pConfig->zName + )); + + } + return p->pIdxNextSelect; +} + +/* +** This is similar to fts5SegIterSeekInit(), except that it initializes +** the segment iterator to point to the first term following the page +** with pToken/nToken on it. +*/ +static void fts5SegIterNextInit( + Fts5Index *p, + const char *pTerm, int nTerm, + Fts5StructureSegment *pSeg, /* Description of segment */ + Fts5SegIter *pIter /* Object to populate */ +){ + int iPg = -1; /* Page of segment to open */ + int bDlidx = 0; + sqlite3_stmt *pSel = 0; /* SELECT to find iPg */ + + pSel = fts5IdxNextStmt(p); + if( pSel ){ + assert( p->rc==SQLITE_OK ); + sqlite3_bind_int(pSel, 1, pSeg->iSegid); + sqlite3_bind_blob(pSel, 2, pTerm, nTerm, SQLITE_STATIC); + + if( sqlite3_step(pSel)==SQLITE_ROW ){ + i64 val = sqlite3_column_int64(pSel, 0); + iPg = (int)(val>>1); + bDlidx = (val & 0x0001); + } + p->rc = sqlite3_reset(pSel); + sqlite3_bind_null(pSel, 2); + if( p->rc ) return; + } + + memset(pIter, 0, sizeof(*pIter)); + pIter->pSeg = pSeg; + pIter->flags |= FTS5_SEGITER_ONETERM; + if( iPg>=0 ){ + pIter->iLeafPgno = iPg - 1; + fts5SegIterNextPage(p, pIter); + fts5SegIterSetNext(p, pIter); + } + if( pIter->pLeaf ){ + const u8 *a = pIter->pLeaf->p; + int iTermOff = 0; + + pIter->iPgidxOff = pIter->pLeaf->szLeaf; + pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], iTermOff); + pIter->iLeafOffset = iTermOff; + fts5SegIterLoadTerm(p, pIter, 0); + fts5SegIterLoadNPos(p, pIter); + if( bDlidx ) fts5SegIterLoadDlidx(p, pIter); + + assert( p->rc!=SQLITE_OK || + fts5BufferCompareBlob(&pIter->term, (const u8*)pTerm, nTerm)>0 + ); + } +} + /* ** Initialize the object pIter to point to term pTerm/nTerm within the ** in-memory hash table. If there is no such term in the hash-table, the @@ -235055,14 +247130,21 @@ static void fts5SegIterHashInit( const u8 *pList = 0; p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm); - sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList); - n = (z ? (int)strlen((const char*)z) : 0); + sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &n, &pList, &nList); if( pList ){ pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data)); if( pLeaf ){ pLeaf->p = (u8*)pList; } } + + /* The call to sqlite3Fts5HashScanInit() causes the hash table to + ** fill the size field of all existing position lists. This means they + ** can no longer be appended to. Since the only scenario in which they + ** can be appended to is if the previous operation on this table was + ** a DELETE, by clearing the Fts5Index.bDelete flag we can avoid this + ** possibility altogether. */ + p->bDelete = 0; }else{ p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data), (const char*)pTerm, nTerm, (void**)&pLeaf, &nList @@ -235093,6 +247175,37 @@ static void fts5SegIterHashInit( fts5SegIterSetNext(p, pIter); } +/* +** Array ap[] contains n elements. Release each of these elements using +** fts5DataRelease(). Then free the array itself using sqlite3_free(). +*/ +static void fts5IndexFreeArray(Fts5Data **ap, int n){ + if( ap ){ + int ii; + for(ii=0; iinRef--; + if( p->nRef<=0 ){ + int ii; + for(ii=0; iinTombstone; ii++){ + fts5DataRelease(p->apTombstone[ii]); + } + sqlite3_free(p); + } + } +} + /* ** Zero the iterator passed as the only argument. */ @@ -235100,6 +247213,7 @@ static void fts5SegIterClear(Fts5SegIter *pIter){ fts5BufferFree(&pIter->term); fts5DataRelease(pIter->pLeaf); fts5DataRelease(pIter->pNextLeaf); + fts5TombstoneArrayDelete(pIter->pTombArray); fts5DlidxIterFree(pIter->pDlidx); sqlite3_free(pIter->aRowidOffset); memset(pIter, 0, sizeof(Fts5SegIter)); @@ -235233,7 +247347,6 @@ static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){ assert_nc( i2!=0 ); pRes->bTermEq = 1; if( p1->iRowid==p2->iRowid ){ - p1->bDel = p2->bDel; return i2; } res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : +1; @@ -235252,7 +247365,8 @@ static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){ /* ** Move the seg-iter so that it points to the first rowid on page iLeafPgno. -** It is an error if leaf iLeafPgno does not exist or contains no rowids. +** It is an error if leaf iLeafPgno does not exist. Unless the db is +** a 'secure-delete' db, if it contains no rowids then this is also an error. */ static void fts5SegIterGotoPage( Fts5Index *p, /* FTS5 backend object */ @@ -235267,21 +247381,23 @@ static void fts5SegIterGotoPage( fts5DataRelease(pIter->pNextLeaf); pIter->pNextLeaf = 0; pIter->iLeafPgno = iLeafPgno-1; - fts5SegIterNextPage(p, pIter); - assert( p->rc!=SQLITE_OK || pIter->iLeafPgno==iLeafPgno ); - if( p->rc==SQLITE_OK && ALWAYS(pIter->pLeaf!=0) ){ + while( p->rc==SQLITE_OK ){ int iOff; - u8 *a = pIter->pLeaf->p; - int n = pIter->pLeaf->szLeaf; - + fts5SegIterNextPage(p, pIter); + if( pIter->pLeaf==0 ) break; iOff = fts5LeafFirstRowidOff(pIter->pLeaf); - if( iOff<4 || iOff>=n ){ - p->rc = FTS5_CORRUPT; - }else{ - iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); - pIter->iLeafOffset = iOff; - fts5SegIterLoadNPos(p, pIter); + if( iOff>0 ){ + u8 *a = pIter->pLeaf->p; + int n = pIter->pLeaf->szLeaf; + if( iOff<4 || iOff>=n ){ + p->rc = FTS5_CORRUPT; + }else{ + iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); + pIter->iLeafOffset = iOff; + fts5SegIterLoadNPos(p, pIter); + } + break; } } } @@ -235342,7 +247458,6 @@ static void fts5SegIterNextFrom( }while( p->rc==SQLITE_OK ); } - /* ** Free the iterator object passed as the second argument. */ @@ -235434,6 +247549,85 @@ static void fts5MultiIterSetEof(Fts5Iter *pIter){ pIter->iSwitchRowid = pSeg->iRowid; } +/* +** The argument to this macro must be an Fts5Data structure containing a +** tombstone hash page. This macro returns the key-size of the hash-page. +*/ +#define TOMBSTONE_KEYSIZE(pPg) (pPg->p[0]==4 ? 4 : 8) + +#define TOMBSTONE_NSLOT(pPg) \ + ((pPg->nn > 16) ? ((pPg->nn-8) / TOMBSTONE_KEYSIZE(pPg)) : 1) + +/* +** Query a single tombstone hash table for rowid iRowid. Return true if +** it is found or false otherwise. The tombstone hash table is one of +** nHashTable tables. +*/ +static int fts5IndexTombstoneQuery( + Fts5Data *pHash, /* Hash table page to query */ + int nHashTable, /* Number of pages attached to segment */ + u64 iRowid /* Rowid to query hash for */ +){ + const int szKey = TOMBSTONE_KEYSIZE(pHash); + const int nSlot = TOMBSTONE_NSLOT(pHash); + int iSlot = (iRowid / nHashTable) % nSlot; + int nCollide = nSlot; + + if( iRowid==0 ){ + return pHash->p[1]; + }else if( szKey==4 ){ + u32 *aSlot = (u32*)&pHash->p[8]; + while( aSlot[iSlot] ){ + if( fts5GetU32((u8*)&aSlot[iSlot])==iRowid ) return 1; + if( nCollide--==0 ) break; + iSlot = (iSlot+1)%nSlot; + } + }else{ + u64 *aSlot = (u64*)&pHash->p[8]; + while( aSlot[iSlot] ){ + if( fts5GetU64((u8*)&aSlot[iSlot])==iRowid ) return 1; + if( nCollide--==0 ) break; + iSlot = (iSlot+1)%nSlot; + } + } + + return 0; +} + +/* +** Return true if the iterator passed as the only argument points +** to an segment entry for which there is a tombstone. Return false +** if there is no tombstone or if the iterator is already at EOF. +*/ +static int fts5MultiIterIsDeleted(Fts5Iter *pIter){ + int iFirst = pIter->aFirst[1].iFirst; + Fts5SegIter *pSeg = &pIter->aSeg[iFirst]; + Fts5TombstoneArray *pArray = pSeg->pTombArray; + + if( pSeg->pLeaf && pArray ){ + /* Figure out which page the rowid might be present on. */ + int iPg = ((u64)pSeg->iRowid) % pArray->nTombstone; + assert( iPg>=0 ); + + /* If tombstone hash page iPg has not yet been loaded from the + ** database, load it now. */ + if( pArray->apTombstone[iPg]==0 ){ + pArray->apTombstone[iPg] = fts5DataRead(pIter->pIndex, + FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg) + ); + if( pArray->apTombstone[iPg]==0 ) return 0; + } + + return fts5IndexTombstoneQuery( + pArray->apTombstone[iPg], + pArray->nTombstone, + pSeg->iRowid + ); + } + + return 0; +} + /* ** Move the iterator to the next entry. ** @@ -235471,7 +247665,9 @@ static void fts5MultiIterNext( fts5AssertMultiIterSetup(p, pIter); assert( pSeg==&pIter->aSeg[pIter->aFirst[1].iFirst] && pSeg->pLeaf ); - if( pIter->bSkipEmpty==0 || pSeg->nPos ){ + if( (pIter->bSkipEmpty==0 || pSeg->nPos) + && 0==fts5MultiIterIsDeleted(pIter) + ){ pIter->xSetOutputs(pIter, pSeg); return; } @@ -235503,7 +247699,9 @@ static void fts5MultiIterNext2( } fts5AssertMultiIterSetup(p, pIter); - }while( fts5MultiIterIsEmpty(p, pIter) ); + }while( (fts5MultiIterIsEmpty(p, pIter) || fts5MultiIterIsDeleted(pIter)) + && (p->rc==SQLITE_OK) + ); } } @@ -235516,7 +247714,7 @@ static Fts5Iter *fts5MultiIterAlloc( int nSeg ){ Fts5Iter *pNew; - int nSlot; /* Power of two >= nSeg */ + i64 nSlot; /* Power of two >= nSeg */ for(nSlot=2; nSlotnSeg-1; iIter>0; iIter--){ + int iEq; + if( (iEq = fts5MultiIterDoCompare(pIter, iIter)) ){ + Fts5SegIter *pSeg = &pIter->aSeg[iEq]; + if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); + fts5MultiIterAdvanced(p, pIter, iEq, iIter); + } + } + fts5MultiIterSetEof(pIter); + fts5AssertMultiIterSetup(p, pIter); + + if( (pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter)) + || fts5MultiIterIsDeleted(pIter) + ){ + fts5MultiIterNext(p, pIter, 0, 0); + }else if( pIter->base.bEof==0 ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + pIter->xSetOutputs(pIter, pSeg); + } +} /* ** Allocate a new Fts5Iter object. @@ -235996,7 +248220,7 @@ static void fts5MultiIterNew( if( iLevel<0 ){ assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) ); nSeg = pStruct->nSegment; - nSeg += (p->pHash ? 1 : 0); + nSeg += (p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH)); }else{ nSeg = MIN(pStruct->aLevel[iLevel].nSeg, nSegment); } @@ -236017,7 +248241,7 @@ static void fts5MultiIterNew( if( p->rc==SQLITE_OK ){ if( iLevel<0 ){ Fts5StructureLevel *pEnd = &pStruct->aLevel[pStruct->nLevel]; - if( p->pHash ){ + if( p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH) ){ /* Add a segment iterator for the current contents of the hash table. */ Fts5SegIter *pIter = &pNew->aSeg[iIter++]; fts5SegIterHashInit(p, pTerm, nTerm, flags, pIter); @@ -236042,29 +248266,12 @@ static void fts5MultiIterNew( assert( iIter==nSeg ); } - /* If the above was successful, each component iterators now points + /* If the above was successful, each component iterator now points ** to the first entry in its segment. In this case initialize the ** aFirst[] array. Or, if an error has occurred, free the iterator ** object and set the output variable to NULL. */ if( p->rc==SQLITE_OK ){ - for(iIter=pNew->nSeg-1; iIter>0; iIter--){ - int iEq; - if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){ - Fts5SegIter *pSeg = &pNew->aSeg[iEq]; - if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); - fts5MultiIterAdvanced(p, pNew, iEq, iIter); - } - } - fts5MultiIterSetEof(pNew); - fts5AssertMultiIterSetup(p, pNew); - - if( pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew) ){ - fts5MultiIterNext(p, pNew, 0, 0); - }else if( pNew->base.bEof==0 ){ - Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst]; - pNew->xSetOutputs(pNew, pSeg); - } - + fts5MultiIterFinishSetup(p, pNew); }else{ fts5MultiIterFree(pNew); *ppOut = 0; @@ -236089,7 +248296,6 @@ static void fts5MultiIterNew2( pNew = fts5MultiIterAlloc(p, 2); if( pNew ){ Fts5SegIter *pIter = &pNew->aSeg[1]; - pIter->flags = FTS5_SEGITER_ONETERM; if( pData->szLeaf>0 ){ pIter->pLeaf = pData; @@ -236236,7 +248442,10 @@ static void fts5IndexDiscardData(Fts5Index *p){ if( p->pHash ){ sqlite3Fts5HashClear(p->pHash); p->nPendingData = 0; + p->nPendingRow = 0; + p->flushRc = SQLITE_OK; } + p->nContentlessDelete = 0; } /* @@ -236450,7 +248659,7 @@ static void fts5WriteDlidxAppend( } if( pDlidx->bPrevValid ){ - iVal = iRowid - pDlidx->iPrev; + iVal = (u64)iRowid - (u64)pDlidx->iPrev; }else{ i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno); assert( pDlidx->buf.n==0 ); @@ -236617,7 +248826,9 @@ static void fts5WriteAppendRowid( fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid); }else{ assert_nc( p->rc || iRowid>pWriter->iPrevRowid ); - fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid - pWriter->iPrevRowid); + fts5BufferAppendVarint(&p->rc, &pPage->buf, + (u64)iRowid - (u64)pWriter->iPrevRowid + ); } pWriter->iPrevRowid = iRowid; pWriter->bFirstRowidInDoclist = 0; @@ -236635,7 +248846,7 @@ static void fts5WriteAppendPoslistData( const u8 *a = aData; int n = nData; - assert( p->pConfig->pgsz>0 ); + assert( p->pConfig->pgsz>0 || p->rc!=SQLITE_OK ); while( p->rc==SQLITE_OK && (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz ){ @@ -236770,7 +248981,7 @@ static void fts5TrimSegments(Fts5Index *p, Fts5Iter *pIter){ fts5BufferAppendBlob(&p->rc, &buf, sizeof(aHdr), aHdr); fts5BufferAppendVarint(&p->rc, &buf, pSeg->term.n); fts5BufferAppendBlob(&p->rc, &buf, pSeg->term.n, pSeg->term.p); - fts5BufferAppendBlob(&p->rc, &buf, pData->szLeaf-iOff,&pData->p[iOff]); + fts5BufferAppendBlob(&p->rc, &buf,pData->szLeaf-iOff,&pData->p[iOff]); if( p->rc==SQLITE_OK ){ /* Set the szLeaf field */ fts5PutU16(&buf.p[2], (u16)buf.n); @@ -236871,6 +249082,12 @@ static void fts5IndexMergeLevel( /* Read input from all segments in the input level */ nInput = pLvl->nSeg; + + /* Set the range of origins that will go into the output segment. */ + if( pStruct->nOriginCntr>0 ){ + pSeg->iOrigin1 = pLvl->aSeg[0].iOrigin1; + pSeg->iOrigin2 = pLvl->aSeg[pLvl->nSeg-1].iOrigin2; + } } bOldest = (pLvlOut->nSeg==1 && pStruct->nLevel==iLvl+2); @@ -236930,8 +249147,11 @@ static void fts5IndexMergeLevel( int i; /* Remove the redundant segments from the %_data table */ + assert( pSeg->nEntry==0 ); for(i=0; iaSeg[i].iSegid); + Fts5StructureSegment *pOld = &pLvl->aSeg[i]; + pSeg->nEntry += (pOld->nEntry - pOld->nEntryTombstone); + fts5DataRemoveSegment(p, pOld); } /* Remove the redundant segments from the input level */ @@ -236957,6 +249177,43 @@ static void fts5IndexMergeLevel( if( pnRem ) *pnRem -= writer.nLeafWritten; } +/* +** If this is not a contentless_delete=1 table, or if the 'deletemerge' +** configuration option is set to 0, then this function always returns -1. +** Otherwise, it searches the structure object passed as the second argument +** for a level suitable for merging due to having a large number of +** tombstones in the tombstone hash. If one is found, its index is returned. +** Otherwise, if there is no suitable level, -1. +*/ +static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){ + Fts5Config *pConfig = p->pConfig; + int iRet = -1; + if( pConfig->bContentlessDelete && pConfig->nDeleteMerge>0 ){ + int ii; + int nBest = 0; + + for(ii=0; iinLevel; ii++){ + Fts5StructureLevel *pLvl = &pStruct->aLevel[ii]; + i64 nEntry = 0; + i64 nTomb = 0; + int iSeg; + for(iSeg=0; iSegnSeg; iSeg++){ + nEntry += pLvl->aSeg[iSeg].nEntry; + nTomb += pLvl->aSeg[iSeg].nEntryTombstone; + } + assert_nc( nEntry>0 || pLvl->nSeg==0 ); + if( nEntry>0 ){ + int nPercent = (nTomb * 100) / nEntry; + if( nPercent>=pConfig->nDeleteMerge && nPercent>nBest ){ + iRet = ii; + nBest = nPercent; + } + } + } + } + return iRet; +} + /* ** Do up to nPg pages of automerge work on the index. ** @@ -236976,14 +249233,15 @@ static int fts5IndexMerge( int iBestLvl = 0; /* Level offering the most input segments */ int nBest = 0; /* Number of input segments on best level */ - /* Set iBestLvl to the level to read input segments from. */ + /* Set iBestLvl to the level to read input segments from. Or to -1 if + ** there is no level suitable to merge segments from. */ assert( pStruct->nLevel>0 ); for(iLvl=0; iLvlnLevel; iLvl++){ Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl]; if( pLvl->nMerge ){ if( pLvl->nMerge>nBest ){ iBestLvl = iLvl; - nBest = pLvl->nMerge; + nBest = nMin; } break; } @@ -236992,22 +249250,18 @@ static int fts5IndexMerge( iBestLvl = iLvl; } } - - /* If nBest is still 0, then the index must be empty. */ -#ifdef SQLITE_DEBUG - for(iLvl=0; nBest==0 && iLvlnLevel; iLvl++){ - assert( pStruct->aLevel[iLvl].nSeg==0 ); + if( nBestaLevel[iBestLvl].nMerge==0 ){ - break; - } + if( iBestLvl<0 ) break; bRet = 1; fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem); if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){ fts5StructurePromote(p, iBestLvl+1, pStruct); } + + if( nMin==1 ) nMin = 2; } *ppStruct = pStruct; return bRet; @@ -237048,16 +249302,16 @@ static void fts5IndexCrisismerge( ){ const int nCrisis = p->pConfig->nCrisisMerge; Fts5Structure *pStruct = *ppStruct; - int iLvl = 0; - - assert( p->rc!=SQLITE_OK || pStruct->nLevel>0 ); - while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ - fts5IndexMergeLevel(p, &pStruct, iLvl, 0); - assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); - fts5StructurePromote(p, iLvl+1, pStruct); - iLvl++; + if( pStruct && pStruct->nLevel>0 ){ + int iLvl = 0; + while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ + fts5IndexMergeLevel(p, &pStruct, iLvl, 0); + assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); + fts5StructurePromote(p, iLvl+1, pStruct); + iLvl++; + } + *ppStruct = pStruct; } - *ppStruct = pStruct; } static int fts5IndexReturn(Fts5Index *p){ @@ -237091,6 +249345,469 @@ static int fts5PoslistPrefix(const u8 *aBuf, int nMax){ return ret; } +/* +** Execute the SQL statement: +** +** DELETE FROM %_idx WHERE (segid, (pgno/2)) = ($iSegid, $iPgno); +** +** This is used when a secure-delete operation removes the last term +** from a segment leaf page. In that case the %_idx entry is removed +** too. This is done to ensure that if all instances of a token are +** removed from an fts5 database in secure-delete mode, no trace of +** the token itself remains in the database. +*/ +static void fts5SecureDeleteIdxEntry( + Fts5Index *p, /* FTS5 backend object */ + int iSegid, /* Id of segment to delete entry for */ + int iPgno /* Page number within segment */ +){ + if( iPgno!=1 ){ + assert( p->pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE ); + if( p->pDeleteFromIdx==0 ){ + fts5IndexPrepareStmt(p, &p->pDeleteFromIdx, sqlite3_mprintf( + "DELETE FROM '%q'.'%q_idx' WHERE (segid, (pgno/2)) = (?1, ?2)", + p->pConfig->zDb, p->pConfig->zName + )); + } + if( p->rc==SQLITE_OK ){ + sqlite3_bind_int(p->pDeleteFromIdx, 1, iSegid); + sqlite3_bind_int(p->pDeleteFromIdx, 2, iPgno); + sqlite3_step(p->pDeleteFromIdx); + p->rc = sqlite3_reset(p->pDeleteFromIdx); + } + } +} + +/* +** This is called when a secure-delete operation removes a position-list +** that overflows onto segment page iPgno of segment pSeg. This function +** rewrites node iPgno, and possibly one or more of its right-hand peers, +** to remove this portion of the position list. +** +** Output variable (*pbLastInDoclist) is set to true if the position-list +** removed is followed by a new term or the end-of-segment, or false if +** it is followed by another rowid/position list. +*/ +static void fts5SecureDeleteOverflow( + Fts5Index *p, + Fts5StructureSegment *pSeg, + int iPgno, + int *pbLastInDoclist +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int pgno; + Fts5Data *pLeaf = 0; + assert( iPgno!=1 ); + + *pbLastInDoclist = 1; + for(pgno=iPgno; p->rc==SQLITE_OK && pgno<=pSeg->pgnoLast; pgno++){ + i64 iRowid = FTS5_SEGMENT_ROWID(pSeg->iSegid, pgno); + int iNext = 0; + u8 *aPg = 0; + + pLeaf = fts5DataRead(p, iRowid); + if( pLeaf==0 ) break; + aPg = pLeaf->p; + + iNext = fts5GetU16(&aPg[0]); + if( iNext!=0 ){ + *pbLastInDoclist = 0; + } + if( iNext==0 && pLeaf->szLeaf!=pLeaf->nn ){ + fts5GetVarint32(&aPg[pLeaf->szLeaf], iNext); + } + + if( iNext==0 ){ + /* The page contains no terms or rowids. Replace it with an empty + ** page and move on to the right-hand peer. */ + const u8 aEmpty[] = {0x00, 0x00, 0x00, 0x04}; + assert_nc( bDetailNone==0 || pLeaf->nn==4 ); + if( bDetailNone==0 ) fts5DataWrite(p, iRowid, aEmpty, sizeof(aEmpty)); + fts5DataRelease(pLeaf); + pLeaf = 0; + }else if( bDetailNone ){ + break; + }else if( iNext>=pLeaf->szLeaf || pLeaf->nnszLeaf || iNext<4 ){ + p->rc = FTS5_CORRUPT; + break; + }else{ + int nShift = iNext - 4; + int nPg; + + int nIdx = 0; + u8 *aIdx = 0; + + /* Unless the current page footer is 0 bytes in size (in which case + ** the new page footer will be as well), allocate and populate a + ** buffer containing the new page footer. Set stack variables aIdx + ** and nIdx accordingly. */ + if( pLeaf->nn>pLeaf->szLeaf ){ + int iFirst = 0; + int i1 = pLeaf->szLeaf; + int i2 = 0; + + i1 += fts5GetVarint32(&aPg[i1], iFirst); + if( iFirstrc = FTS5_CORRUPT; + break; + } + aIdx = sqlite3Fts5MallocZero(&p->rc, (pLeaf->nn-pLeaf->szLeaf)+2); + if( aIdx==0 ) break; + i2 = sqlite3Fts5PutVarint(aIdx, iFirst-nShift); + if( i1nn ){ + memcpy(&aIdx[i2], &aPg[i1], pLeaf->nn-i1); + i2 += (pLeaf->nn-i1); + } + nIdx = i2; + } + + /* Modify the contents of buffer aPg[]. Set nPg to the new size + ** in bytes. The new page is always smaller than the old. */ + nPg = pLeaf->szLeaf - nShift; + memmove(&aPg[4], &aPg[4+nShift], nPg-4); + fts5PutU16(&aPg[2], nPg); + if( fts5GetU16(&aPg[0]) ) fts5PutU16(&aPg[0], 4); + if( nIdx>0 ){ + memcpy(&aPg[nPg], aIdx, nIdx); + nPg += nIdx; + } + sqlite3_free(aIdx); + + /* Write the new page to disk and exit the loop */ + assert( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, iRowid, aPg, nPg); + break; + } + } + fts5DataRelease(pLeaf); +} + +/* +** Completely remove the entry that pSeg currently points to from +** the database. +*/ +static void fts5DoSecureDelete( + Fts5Index *p, + Fts5SegIter *pSeg +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int iSegid = pSeg->pSeg->iSegid; + u8 *aPg = pSeg->pLeaf->p; + int nPg = pSeg->pLeaf->nn; + int iPgIdx = pSeg->pLeaf->szLeaf; + + u64 iDelta = 0; + int iNextOff = 0; + int iOff = 0; + int nIdx = 0; + u8 *aIdx = 0; + int bLastInDoclist = 0; + int iIdx = 0; + int iStart = 0; + int iDelKeyOff = 0; /* Offset of deleted key, if any */ + + nIdx = nPg-iPgIdx; + aIdx = sqlite3Fts5MallocZero(&p->rc, nIdx+16); + if( p->rc ) return; + memcpy(aIdx, &aPg[iPgIdx], nIdx); + + /* At this point segment iterator pSeg points to the entry + ** this function should remove from the b-tree segment. + ** + ** In detail=full or detail=column mode, pSeg->iLeafOffset is the + ** offset of the first byte in the position-list for the entry to + ** remove. Immediately before this comes two varints that will also + ** need to be removed: + ** + ** + the rowid or delta rowid value for the entry, and + ** + the size of the position list in bytes. + ** + ** Or, in detail=none mode, there is a single varint prior to + ** pSeg->iLeafOffset - the rowid or delta rowid value. + ** + ** This block sets the following variables: + ** + ** iStart: + ** The offset of the first byte of the rowid or delta-rowid + ** value for the doclist entry being removed. + ** + ** iDelta: + ** The value of the rowid or delta-rowid value for the doclist + ** entry being removed. + ** + ** iNextOff: + ** The offset of the next entry following the position list + ** for the one being removed. If the position list for this + ** entry overflows onto the next leaf page, this value will be + ** greater than pLeaf->szLeaf. + */ + { + int iSOP; /* Start-Of-Position-list */ + if( pSeg->iLeafPgno==pSeg->iTermLeafPgno ){ + iStart = pSeg->iTermLeafOffset; + }else{ + iStart = fts5GetU16(&aPg[0]); + } + + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + assert_nc( iSOP<=pSeg->iLeafOffset ); + + if( bDetailNone ){ + while( iSOPiLeafOffset ){ + if( aPg[iSOP]==0x00 ) iSOP++; + if( aPg[iSOP]==0x00 ) iSOP++; + iStart = iSOP; + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + } + + iNextOff = iSOP; + if( iNextOffiEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + if( iNextOffiEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + + }else{ + int nPos = 0; + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + while( iSOPiLeafOffset ){ + iStart = iSOP + (nPos/2); + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + } + assert_nc( iSOP==pSeg->iLeafOffset ); + iNextOff = pSeg->iLeafOffset + pSeg->nPos; + } + } + + iOff = iStart; + + /* If the position-list for the entry being removed flows over past + ** the end of this page, delete the portion of the position-list on the + ** next page and beyond. + ** + ** Set variable bLastInDoclist to true if this entry happens + ** to be the last rowid in the doclist for its term. */ + if( iNextOff>=iPgIdx ){ + int pgno = pSeg->iLeafPgno+1; + fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); + iNextOff = iPgIdx; + } + + if( pSeg->bDel==0 ){ + if( iNextOff!=iPgIdx ){ + /* Loop through the page-footer. If iNextOff (offset of the + ** entry following the one we are removing) is equal to the + ** offset of a key on this page, then the entry is the last + ** in its doclist. */ + int iKeyOff = 0; + for(iIdx=0; iIdxbDel ){ + iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta); + aPg[iOff++] = 0x01; + }else if( bLastInDoclist==0 ){ + if( iNextOff!=iPgIdx ){ + u64 iNextDelta = 0; + iNextOff += fts5GetVarint(&aPg[iNextOff], &iNextDelta); + iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta + iNextDelta); + } + }else if( + pSeg->iLeafPgno==pSeg->iTermLeafPgno + && iStart==pSeg->iTermLeafOffset + ){ + /* The entry being removed was the only position list in its + ** doclist. Therefore the term needs to be removed as well. */ + int iKey = 0; + int iKeyOff = 0; + + /* Set iKeyOff to the offset of the term that will be removed - the + ** last offset in the footer that is not greater than iStart. */ + for(iIdx=0; iIdx(u32)iStart ) break; + iKeyOff += iVal; + } + assert_nc( iKey>=1 ); + + /* Set iDelKeyOff to the value of the footer entry to remove from + ** the page. */ + iDelKeyOff = iOff = iKeyOff; + + if( iNextOff!=iPgIdx ){ + /* This is the only position-list associated with the term, and there + ** is another term following it on this page. So the subsequent term + ** needs to be moved to replace the term associated with the entry + ** being removed. */ + int nPrefix = 0; + int nSuffix = 0; + int nPrefix2 = 0; + int nSuffix2 = 0; + + iDelKeyOff = iNextOff; + iNextOff += fts5GetVarint32(&aPg[iNextOff], nPrefix2); + iNextOff += fts5GetVarint32(&aPg[iNextOff], nSuffix2); + + if( iKey!=1 ){ + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nPrefix); + } + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nSuffix); + + nPrefix = MIN(nPrefix, nPrefix2); + nSuffix = (nPrefix2 + nSuffix2) - nPrefix; + + if( (iKeyOff+nSuffix)>iPgIdx || (iNextOff+nSuffix2)>iPgIdx ){ + p->rc = FTS5_CORRUPT; + }else{ + if( iKey!=1 ){ + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nPrefix); + } + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nSuffix); + if( nPrefix2>pSeg->term.n ){ + p->rc = FTS5_CORRUPT; + }else if( nPrefix2>nPrefix ){ + memcpy(&aPg[iOff], &pSeg->term.p[nPrefix], nPrefix2-nPrefix); + iOff += (nPrefix2-nPrefix); + } + memmove(&aPg[iOff], &aPg[iNextOff], nSuffix2); + iOff += nSuffix2; + iNextOff += nSuffix2; + } + } + }else if( iStart==4 ){ + int iPgno; + + assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno ); + /* The entry being removed may be the only position list in + ** its doclist. */ + for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){ + Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno)); + int bEmpty = (pPg && pPg->nn==4); + fts5DataRelease(pPg); + if( bEmpty==0 ) break; + } + + if( iPgno==pSeg->iTermLeafPgno ){ + i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno); + Fts5Data *pTerm = fts5DataRead(p, iId); + if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){ + u8 *aTermIdx = &pTerm->p[pTerm->szLeaf]; + int nTermIdx = pTerm->nn - pTerm->szLeaf; + int iTermIdx = 0; + int iTermOff = 0; + + while( 1 ){ + u32 iVal = 0; + int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal); + iTermOff += iVal; + if( (iTermIdx+nByte)>=nTermIdx ) break; + iTermIdx += nByte; + } + nTermIdx = iTermIdx; + + memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx); + fts5PutU16(&pTerm->p[2], iTermOff); + + fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx); + if( nTermIdx==0 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno); + } + } + fts5DataRelease(pTerm); + } + } + + /* Assuming no error has occurred, this block does final edits to the + ** leaf page before writing it back to disk. Input variables are: + ** + ** nPg: Total initial size of leaf page. + ** iPgIdx: Initial offset of page footer. + ** + ** iOff: Offset to move data to + ** iNextOff: Offset to move data from + */ + if( p->rc==SQLITE_OK ){ + const int nMove = nPg - iNextOff; /* Number of bytes to move */ + int nShift = iNextOff - iOff; /* Distance to move them */ + + int iPrevKeyOut = 0; + int iKeyIn = 0; + + memmove(&aPg[iOff], &aPg[iNextOff], nMove); + iPgIdx -= nShift; + nPg = iPgIdx; + fts5PutU16(&aPg[2], iPgIdx); + + for(iIdx=0; iIdxiOff ? nShift : 0)); + nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOut - iPrevKeyOut); + iPrevKeyOut = iKeyOut; + } + } + + if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno); + } + + assert_nc( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg, nPg); + } + sqlite3_free(aIdx); +} + +/* +** This is called as part of flushing a delete to disk in 'secure-delete' +** mode. It edits the segments within the database described by argument +** pStruct to remove the entries for term zTerm, rowid iRowid. +*/ +static void fts5FlushSecureDelete( + Fts5Index *p, + Fts5Structure *pStruct, + const char *zTerm, + int nTerm, + i64 iRowid +){ + const int f = FTS5INDEX_QUERY_SKIPHASH; + Fts5Iter *pIter = 0; /* Used to find term instance */ + + fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter); + if( fts5MultiIterEof(p, pIter)==0 ){ + i64 iThis = fts5MultiIterRowid(pIter); + if( iThisrc==SQLITE_OK + && fts5MultiIterEof(p, pIter)==0 + && iRowid==fts5MultiIterRowid(pIter) + ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + fts5DoSecureDelete(p, pSeg); + } + } + + fts5MultiIterFree(pIter); +} + + /* ** Flush the contents of in-memory hash table iHash to a new level-0 ** segment on disk. Also update the corresponding structure record. @@ -237107,143 +249824,197 @@ static void fts5FlushOneHash(Fts5Index *p){ /* Obtain a reference to the index structure and allocate a new segment-id ** for the new level-0 segment. */ pStruct = fts5StructureRead(p); - iSegid = fts5AllocateSegid(p, pStruct); fts5StructureInvalidate(p); - if( iSegid ){ - const int pgsz = p->pConfig->pgsz; - int eDetail = p->pConfig->eDetail; - Fts5StructureSegment *pSeg; /* New segment within pStruct */ - Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */ - Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */ + if( sqlite3Fts5HashIsEmpty(pHash)==0 ){ + iSegid = fts5AllocateSegid(p, pStruct); + if( iSegid ){ + const int pgsz = p->pConfig->pgsz; + int eDetail = p->pConfig->eDetail; + int bSecureDelete = p->pConfig->bSecureDelete; + Fts5StructureSegment *pSeg; /* New segment within pStruct */ + Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */ + Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */ + + Fts5SegWriter writer; + fts5WriteInit(p, &writer, iSegid); + + pBuf = &writer.writer.buf; + pPgidx = &writer.writer.pgidx; + + /* fts5WriteInit() should have initialized the buffers to (most likely) + ** the maximum space required. */ + assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + + /* Begin scanning through hash table entries. This loop runs once for each + ** term/doclist currently stored within the hash table. */ + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0); + } + while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){ + const char *zTerm; /* Buffer containing term */ + int nTerm; /* Size of zTerm in bytes */ + const u8 *pDoclist; /* Pointer to doclist for this term */ + int nDoclist; /* Size of doclist in bytes */ - Fts5SegWriter writer; - fts5WriteInit(p, &writer, iSegid); + /* Get the term and doclist for this entry. */ + sqlite3Fts5HashScanEntry(pHash, &zTerm, &nTerm, &pDoclist, &nDoclist); + if( bSecureDelete==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + if( p->rc!=SQLITE_OK ) break; + assert( writer.bFirstRowidInPage==0 ); + } - pBuf = &writer.writer.buf; - pPgidx = &writer.writer.pgidx; + if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ + /* The entire doclist will fit on the current leaf. */ + fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); + }else{ + int bTermWritten = !bSecureDelete; + i64 iRowid = 0; + i64 iPrev = 0; + int iOff = 0; + + /* The entire doclist will not fit on this leaf. The following + ** loop iterates through the poslists that make up the current + ** doclist. */ + while( p->rc==SQLITE_OK && iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ + iOff++; + continue; + } + } + } - /* fts5WriteInit() should have initialized the buffers to (most likely) - ** the maximum space required. */ - assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) ); - assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + if( p->rc==SQLITE_OK && bTermWritten==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + bTermWritten = 1; + assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 ); + } - /* Begin scanning through hash table entries. This loop runs once for each - ** term/doclist currently stored within the hash table. */ - if( p->rc==SQLITE_OK ){ - p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0); - } - while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){ - const char *zTerm; /* Buffer containing term */ - const u8 *pDoclist; /* Pointer to doclist for this term */ - int nDoclist; /* Size of doclist in bytes */ - - /* Write the term for this entry to disk. */ - sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist); - fts5WriteAppendTerm(p, &writer, (int)strlen(zTerm), (const u8*)zTerm); - if( p->rc!=SQLITE_OK ) break; - - assert( writer.bFirstRowidInPage==0 ); - if( pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ - /* The entire doclist will fit on the current leaf. */ - fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); - }else{ - i64 iRowid = 0; - u64 iDelta = 0; - int iOff = 0; - - /* The entire doclist will not fit on this leaf. The following - ** loop iterates through the poslists that make up the current - ** doclist. */ - while( p->rc==SQLITE_OK && iOffp[0], (u16)pBuf->n); /* first rowid on page */ - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid); - writer.bFirstRowidInPage = 0; - fts5WriteDlidxAppend(p, &writer, iRowid); + if( writer.bFirstRowidInPage ){ + fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */ + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid); + writer.bFirstRowidInPage = 0; + fts5WriteDlidxAppend(p, &writer, iRowid); + }else{ + u64 iRowidDelta = (u64)iRowid - (u64)iPrev; + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowidDelta); + } if( p->rc!=SQLITE_OK ) break; - }else{ - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iDelta); - } - assert( pBuf->n<=pBuf->nSpace ); + assert( pBuf->n<=pBuf->nSpace ); + iPrev = iRowid; - if( eDetail==FTS5_DETAIL_NONE ){ - if( iOffp[pBuf->n++] = 0; - iOff++; + if( eDetail==FTS5_DETAIL_NONE ){ if( iOffp[pBuf->n++] = 0; iOff++; + if( iOffp[pBuf->n++] = 0; + iOff++; + } + } + if( (pBuf->n + pPgidx->n)>=pgsz ){ + fts5WriteFlushLeaf(p, &writer); } - } - if( (pBuf->n + pPgidx->n)>=pgsz ){ - fts5WriteFlushLeaf(p, &writer); - } - }else{ - int bDummy; - int nPos; - int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy); - nCopy += nPos; - if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){ - /* The entire poslist will fit on the current leaf. So copy - ** it in one go. */ - fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy); }else{ - /* The entire poslist will not fit on this leaf. So it needs - ** to be broken into sections. The only qualification being - ** that each varint must be stored contiguously. */ - const u8 *pPoslist = &pDoclist[iOff]; - int iPos = 0; - while( p->rc==SQLITE_OK ){ - int nSpace = pgsz - pBuf->n - pPgidx->n; - int n = 0; - if( (nCopy - iPos)<=nSpace ){ - n = nCopy - iPos; - }else{ - n = fts5PoslistPrefix(&pPoslist[iPos], nSpace); - } - assert( n>0 ); - fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n); - iPos += n; - if( (pBuf->n + pPgidx->n)>=pgsz ){ - fts5WriteFlushLeaf(p, &writer); + int bDel = 0; + int nPos = 0; + int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDel); + if( bDel && bSecureDelete ){ + fts5BufferAppendVarint(&p->rc, pBuf, nPos*2); + iOff += nCopy; + nCopy = nPos; + }else{ + nCopy += nPos; + } + if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){ + /* The entire poslist will fit on the current leaf. So copy + ** it in one go. */ + fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy); + }else{ + /* The entire poslist will not fit on this leaf. So it needs + ** to be broken into sections. The only qualification being + ** that each varint must be stored contiguously. */ + const u8 *pPoslist = &pDoclist[iOff]; + int iPos = 0; + while( p->rc==SQLITE_OK ){ + int nSpace = pgsz - pBuf->n - pPgidx->n; + int n = 0; + if( (nCopy - iPos)<=nSpace ){ + n = nCopy - iPos; + }else{ + n = fts5PoslistPrefix(&pPoslist[iPos], nSpace); + } + assert( n>0 ); + fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n); + iPos += n; + if( (pBuf->n + pPgidx->n)>=pgsz ){ + fts5WriteFlushLeaf(p, &writer); + } + if( iPos>=nCopy ) break; } - if( iPos>=nCopy ) break; } + iOff += nCopy; } - iOff += nCopy; } } - } - /* TODO2: Doclist terminator written here. */ - /* pBuf->p[pBuf->n++] = '\0'; */ - assert( pBuf->n<=pBuf->nSpace ); - if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash); - } - sqlite3Fts5HashClear(pHash); - fts5WriteFinish(p, &writer, &pgnoLast); + /* TODO2: Doclist terminator written here. */ + /* pBuf->p[pBuf->n++] = '\0'; */ + assert( pBuf->n<=pBuf->nSpace ); + if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash); + } + fts5WriteFinish(p, &writer, &pgnoLast); - /* Update the Fts5Structure. It is written back to the database by the - ** fts5StructureRelease() call below. */ - if( pStruct->nLevel==0 ){ - fts5StructureAddLevel(&p->rc, &pStruct); - } - fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); - if( p->rc==SQLITE_OK ){ - pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; - pSeg->iSegid = iSegid; - pSeg->pgnoFirst = 1; - pSeg->pgnoLast = pgnoLast; - pStruct->nSegment++; + assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 ); + if( pgnoLast>0 ){ + /* Update the Fts5Structure. It is written back to the database by the + ** fts5StructureRelease() call below. */ + if( pStruct->nLevel==0 ){ + fts5StructureAddLevel(&p->rc, &pStruct); + } + fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); + if( p->rc==SQLITE_OK ){ + pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; + pSeg->iSegid = iSegid; + pSeg->pgnoFirst = 1; + pSeg->pgnoLast = pgnoLast; + if( pStruct->nOriginCntr>0 ){ + pSeg->iOrigin1 = pStruct->nOriginCntr; + pSeg->iOrigin2 = pStruct->nOriginCntr; + pSeg->nEntry = p->nPendingRow; + pStruct->nOriginCntr++; + } + pStruct->nSegment++; + } + fts5StructurePromote(p, 0, pStruct); + } } - fts5StructurePromote(p, 0, pStruct); } - fts5IndexAutomerge(p, &pStruct, pgnoLast); + fts5IndexAutomerge(p, &pStruct, pgnoLast + p->nContentlessDelete); fts5IndexCrisismerge(p, &pStruct); fts5StructureWrite(p, pStruct); fts5StructureRelease(pStruct); @@ -237254,10 +250025,21 @@ static void fts5FlushOneHash(Fts5Index *p){ */ static void fts5IndexFlush(Fts5Index *p){ /* Unless it is empty, flush the hash table to disk */ - if( p->nPendingData ){ + if( p->flushRc ){ + p->rc = p->flushRc; + return; + } + if( p->nPendingData || p->nContentlessDelete ){ assert( p->pHash ); - p->nPendingData = 0; fts5FlushOneHash(p); + if( p->rc==SQLITE_OK ){ + sqlite3Fts5HashClear(p->pHash); + p->nPendingData = 0; + p->nPendingRow = 0; + p->nContentlessDelete = 0; + }else if( p->nPendingData || p->nContentlessDelete ){ + p->flushRc = p->rc; + } } } @@ -237273,17 +250055,22 @@ static Fts5Structure *fts5IndexOptimizeStruct( /* Figure out if this structure requires optimization. A structure does ** not require optimization if either: ** - ** + it consists of fewer than two segments, or - ** + all segments are on the same level, or - ** + all segments except one are currently inputs to a merge operation. + ** 1. it consists of fewer than two segments, or + ** 2. all segments are on the same level, or + ** 3. all segments except one are currently inputs to a merge operation. ** - ** In the first case, return NULL. In the second, increment the ref-count - ** on *pStruct and return a copy of the pointer to it. + ** In the first case, if there are no tombstone hash pages, return NULL. In + ** the second, increment the ref-count on *pStruct and return a copy of the + ** pointer to it. */ - if( nSeg<2 ) return 0; + if( nSeg==0 ) return 0; for(i=0; inLevel; i++){ int nThis = pStruct->aLevel[i].nSeg; - if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){ + int nMerge = pStruct->aLevel[i].nMerge; + if( nThis>0 && (nThis==nSeg || (nThis==nSeg-1 && nMerge==nThis)) ){ + if( nSeg==1 && nThis==1 && pStruct->aLevel[i].aSeg[0].nPgTombstone==0 ){ + return 0; + } fts5StructureRef(pStruct); return pStruct; } @@ -237296,10 +250083,11 @@ static Fts5Structure *fts5IndexOptimizeStruct( if( pNew ){ Fts5StructureLevel *pLvl; nByte = nSeg * sizeof(Fts5StructureSegment); - pNew->nLevel = pStruct->nLevel+1; + pNew->nLevel = MIN(pStruct->nLevel+1, FTS5_MAX_LEVEL); pNew->nRef = 1; pNew->nWriteCounter = pStruct->nWriteCounter; - pLvl = &pNew->aLevel[pStruct->nLevel]; + pNew->nOriginCntr = pStruct->nOriginCntr; + pLvl = &pNew->aLevel[pNew->nLevel-1]; pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pLvl->aSeg ){ int iLvl, iSeg; @@ -237329,7 +250117,9 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); + assert( p->rc!=SQLITE_OK || p->nContentlessDelete==0 ); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || pStruct!=0 ); fts5StructureInvalidate(p); if( pStruct ){ @@ -237358,7 +250148,10 @@ static int sqlite3Fts5IndexOptimize(Fts5Index *p){ ** INSERT command. */ static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ - Fts5Structure *pStruct = fts5StructureRead(p); + Fts5Structure *pStruct = 0; + + fts5IndexFlush(p); + pStruct = fts5StructureRead(p); if( pStruct ){ int nMin = p->pConfig->nUsermerge; fts5StructureInvalidate(p); @@ -237366,7 +250159,7 @@ static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct); fts5StructureRelease(pStruct); pStruct = pNew; - nMin = 2; + nMin = 1; nMerge = nMerge*-1; } if( pStruct && pStruct->nLevel ){ @@ -237381,7 +250174,7 @@ static int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ static void fts5AppendRowid( Fts5Index *p, - i64 iDelta, + u64 iDelta, Fts5Iter *pUnused, Fts5Buffer *pBuf ){ @@ -237391,7 +250184,7 @@ static void fts5AppendRowid( static void fts5AppendPoslist( Fts5Index *p, - i64 iDelta, + u64 iDelta, Fts5Iter *pMulti, Fts5Buffer *pBuf ){ @@ -237466,10 +250259,10 @@ static void fts5MergeAppendDocid( } #endif -#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \ - assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \ - fts5BufferSafeAppendVarint((pBuf), (iRowid) - (iLastRowid)); \ - (iLastRowid) = (iRowid); \ +#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \ + assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \ + fts5BufferSafeAppendVarint((pBuf), (u64)(iRowid) - (u64)(iLastRowid)); \ + (iLastRowid) = (iRowid); \ } /* @@ -237601,7 +250394,7 @@ static void fts5MergePrefixLists( /* Initialize a doclist-iterator for each input buffer. Arrange them in ** a linked-list starting at pHead in ascending order of rowid. Avoid ** linking any iterators already at EOF into the linked list at all. */ - assert( nBuf+1<=sizeof(aMerger)/sizeof(aMerger[0]) ); + assert( nBuf+1<=(int)(sizeof(aMerger)/sizeof(aMerger[0])) ); memset(aMerger, 0, sizeof(PrefixMerger)*(nBuf+1)); pHead = &aMerger[nBuf]; fts5DoclistIterInit(p1, &pHead->iter); @@ -237732,7 +250525,7 @@ static void fts5SetupPrefixIter( u8 *pToken, /* Buffer containing prefix to match */ int nToken, /* Size of buffer pToken in bytes */ Fts5Colset *pColset, /* Restrict matches to these columns */ - Fts5Iter **ppIter /* OUT: New iterator */ + Fts5Iter **ppIter /* OUT: New iterator */ ){ Fts5Structure *pStruct; Fts5Buffer *aBuf; @@ -237740,7 +250533,7 @@ static void fts5SetupPrefixIter( int nMerge = 1; void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); - void (*xAppend)(Fts5Index*, i64, Fts5Iter*, Fts5Buffer*); + void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ xMerge = fts5MergeRowidLists; xAppend = fts5AppendRowid; @@ -237753,8 +250546,9 @@ static void fts5SetupPrefixIter( aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || (aBuf && pStruct) ); - if( aBuf && pStruct ){ + if( p->rc==SQLITE_OK ){ const int flags = FTS5INDEX_QUERY_SCAN | FTS5INDEX_QUERY_SKIPEMPTY | FTS5INDEX_QUERY_NOOUTPUT; @@ -237766,6 +250560,12 @@ static void fts5SetupPrefixIter( int bNewTerm = 1; memset(&doclist, 0, sizeof(doclist)); + + /* If iIdx is non-zero, then it is the number of a prefix-index for + ** prefixes 1 character longer than the prefix being queried for. That + ** index contains all the doclists required, except for the one + ** corresponding to the prefix itself. That one is extracted from the + ** main term index here. */ if( iIdx!=0 ){ int dummy = 0; const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; @@ -237779,7 +250579,7 @@ static void fts5SetupPrefixIter( Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; p1->xSetOutputs(p1, pSeg); if( p1->base.nData ){ - xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist); + xAppend(p, (u64)p1->base.iRowid-(u64)iLastRowid, p1, &doclist); iLastRowid = p1->base.iRowid; } } @@ -237789,6 +250589,7 @@ static void fts5SetupPrefixIter( pToken[0] = FTS5_MAIN_PREFIX + iIdx; fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); fts5IterSetOutputCb(&p->rc, p1); + for( /* no-op */ ; fts5MultiIterEof(p, p1)==0; fts5MultiIterNext2(p, p1, &bNewTerm) @@ -237804,7 +250605,6 @@ static void fts5SetupPrefixIter( } if( p1->base.nData==0 ) continue; - if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ int i1 = i*nMerge; @@ -237827,7 +250627,7 @@ static void fts5SetupPrefixIter( iLastRowid = 0; } - xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist); + xAppend(p, (u64)p1->base.iRowid-(u64)iLastRowid, p1, &doclist); iLastRowid = p1->base.iRowid; } @@ -237843,7 +250643,7 @@ static void fts5SetupPrefixIter( } fts5MultiIterFree(p1); - pData = fts5IdxMalloc(p, sizeof(Fts5Data)+doclist.n+FTS5_DATA_ZERO_PADDING); + pData = fts5IdxMalloc(p, sizeof(*pData)+doclist.n+FTS5_DATA_ZERO_PADDING); if( pData ){ pData->p = (u8*)&pData[1]; pData->nn = pData->szLeaf = doclist.n; @@ -237880,6 +250680,9 @@ static int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){ p->iWriteRowid = iRowid; p->bDelete = bDelete; + if( bDelete==0 ){ + p->nPendingRow++; + } return fts5IndexReturn(p); } @@ -237917,6 +250720,9 @@ static int sqlite3Fts5IndexReinit(Fts5Index *p){ fts5StructureInvalidate(p); fts5IndexDiscardData(p); memset(&s, 0, sizeof(Fts5Structure)); + if( p->pConfig->bContentlessDelete ){ + s.nOriginCntr = 1; + } fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0); fts5StructureWrite(p, &s); return fts5IndexReturn(p); @@ -237980,7 +250786,9 @@ static int sqlite3Fts5IndexClose(Fts5Index *p){ sqlite3_finalize(p->pIdxWriter); sqlite3_finalize(p->pIdxDeleter); sqlite3_finalize(p->pIdxSelect); + sqlite3_finalize(p->pIdxNextSelect); sqlite3_finalize(p->pDataVersion); + sqlite3_finalize(p->pDeleteFromIdx); sqlite3Fts5HashFree(p->pHash); sqlite3_free(p->zDataTbl); sqlite3_free(p); @@ -238074,6 +250882,457 @@ static int sqlite3Fts5IndexWrite( return rc; } +/* +** pToken points to a buffer of size nToken bytes containing a search +** term, including the index number at the start, used on a tokendata=1 +** table. This function returns true if the term in buffer pBuf matches +** token pToken/nToken. +*/ +static int fts5IsTokendataPrefix( + Fts5Buffer *pBuf, + const u8 *pToken, + int nToken +){ + return ( + pBuf->n>=nToken + && 0==memcmp(pBuf->p, pToken, nToken) + && (pBuf->n==nToken || pBuf->p[nToken]==0x00) + ); +} + +/* +** Ensure the segment-iterator passed as the only argument points to EOF. +*/ +static void fts5SegIterSetEOF(Fts5SegIter *pSeg){ + fts5DataRelease(pSeg->pLeaf); + pSeg->pLeaf = 0; +} + +/* +** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an +** array of these for each row it visits. Or, for an iterator used by an +** "ORDER BY rank" query, it accumulates an array of these for the entire +** query. +** +** Each instance in the array indicates the iterator (and therefore term) +** associated with position iPos of rowid iRowid. This is used by the +** xInstToken() API. +*/ +struct Fts5TokenDataMap { + i64 iRowid; /* Row this token is located in */ + i64 iPos; /* Position of token */ + int iIter; /* Iterator token was read from */ +}; + +/* +** An object used to supplement Fts5Iter for tokendata=1 iterators. +*/ +struct Fts5TokenDataIter { + int nIter; + int nIterAlloc; + + int nMap; + int nMapAlloc; + Fts5TokenDataMap *aMap; + + Fts5PoslistReader *aPoslistReader; + int *aPoslistToIter; + Fts5Iter *apIter[1]; +}; + +/* +** This function appends iterator pAppend to Fts5TokenDataIter pIn and +** returns the result. +*/ +static Fts5TokenDataIter *fts5AppendTokendataIter( + Fts5Index *p, /* Index object (for error code) */ + Fts5TokenDataIter *pIn, /* Current Fts5TokenDataIter struct */ + Fts5Iter *pAppend /* Append this iterator */ +){ + Fts5TokenDataIter *pRet = pIn; + + if( p->rc==SQLITE_OK ){ + if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){ + int nAlloc = pIn ? pIn->nIterAlloc*2 : 16; + int nByte = nAlloc * sizeof(Fts5Iter*) + sizeof(Fts5TokenDataIter); + Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte); + + if( pNew==0 ){ + p->rc = SQLITE_NOMEM; + }else{ + if( pIn==0 ) memset(pNew, 0, nByte); + pRet = pNew; + pNew->nIterAlloc = nAlloc; + } + } + } + if( p->rc ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pAppend); + }else{ + pRet->apIter[pRet->nIter++] = pAppend; + } + assert( pRet==0 || pRet->nIter<=pRet->nIterAlloc ); + + return pRet; +} + +/* +** Delete an Fts5TokenDataIter structure and its contents. +*/ +static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ + if( pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + fts5MultiIterFree(pSet->apIter[ii]); + } + sqlite3_free(pSet->aPoslistReader); + sqlite3_free(pSet->aMap); + sqlite3_free(pSet); + } +} + +/* +** Append a mapping to the token-map belonging to object pT. +*/ +static void fts5TokendataIterAppendMap( + Fts5Index *p, + Fts5TokenDataIter *pT, + int iIter, + i64 iRowid, + i64 iPos +){ + if( p->rc==SQLITE_OK ){ + if( pT->nMap==pT->nMapAlloc ){ + int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; + int nByte = nNew * sizeof(Fts5TokenDataMap); + Fts5TokenDataMap *aNew; + + aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nByte); + if( aNew==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pT->aMap[pT->nMap].iRowid = iRowid; + pT->aMap[pT->nMap].iPos = iPos; + pT->aMap[pT->nMap].iIter = iIter; + pT->nMap++; + } +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function sets the iterator output +** variables (pIter->base.*) according to the contents of the current +** row. +*/ +static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ + int ii; + int nHit = 0; + i64 iRowid = SMALLEST_INT64; + int iMin = 0; + + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + + pIter->base.nData = 0; + pIter->base.pData = 0; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 ){ + if( nHit==0 || p->base.iRowidbase.iRowid; + nHit = 1; + pIter->base.pData = p->base.pData; + pIter->base.nData = p->base.nData; + iMin = ii; + }else if( p->base.iRowid==iRowid ){ + nHit++; + } + } + } + + if( nHit==0 ){ + pIter->base.bEof = 1; + }else{ + int eDetail = pIter->pIndex->pConfig->eDetail; + pIter->base.bEof = 0; + pIter->base.iRowid = iRowid; + + if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){ + fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, iRowid, -1); + }else + if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){ + int nReader = 0; + int nByte = 0; + i64 iPrev = 0; + + /* Allocate array of iterators if they are not already allocated. */ + if( pT->aPoslistReader==0 ){ + pT->aPoslistReader = (Fts5PoslistReader*)sqlite3Fts5MallocZero( + &pIter->pIndex->rc, + pT->nIter * (sizeof(Fts5PoslistReader) + sizeof(int)) + ); + if( pT->aPoslistReader==0 ) return; + pT->aPoslistToIter = (int*)&pT->aPoslistReader[pT->nIter]; + } + + /* Populate an iterator for each poslist that will be merged */ + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( iRowid==p->base.iRowid ){ + pT->aPoslistToIter[nReader] = ii; + sqlite3Fts5PoslistReaderInit( + p->base.pData, p->base.nData, &pT->aPoslistReader[nReader++] + ); + nByte += p->base.nData; + } + } + + /* Ensure the output buffer is large enough */ + if( fts5BufferGrow(&pIter->pIndex->rc, &pIter->poslist, nByte+nHit*10) ){ + return; + } + + /* Ensure the token-mapping is large enough */ + if( eDetail==FTS5_DETAIL_FULL && pT->nMapAlloc<(pT->nMap + nByte) ){ + int nNew = (pT->nMapAlloc + nByte) * 2; + Fts5TokenDataMap *aNew = (Fts5TokenDataMap*)sqlite3_realloc( + pT->aMap, nNew*sizeof(Fts5TokenDataMap) + ); + if( aNew==0 ){ + pIter->pIndex->rc = SQLITE_NOMEM; + return; + } + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pIter->poslist.n = 0; + + while( 1 ){ + i64 iMinPos = LARGEST_INT64; + + /* Find smallest position */ + iMin = 0; + for(ii=0; iiaPoslistReader[ii]; + if( pReader->bEof==0 ){ + if( pReader->iPosiPos; + iMin = ii; + } + } + } + + /* If all readers were at EOF, break out of the loop. */ + if( iMinPos==LARGEST_INT64 ) break; + + sqlite3Fts5PoslistSafeAppend(&pIter->poslist, &iPrev, iMinPos); + sqlite3Fts5PoslistReaderNext(&pT->aPoslistReader[iMin]); + + if( eDetail==FTS5_DETAIL_FULL ){ + pT->aMap[pT->nMap].iPos = iMinPos; + pT->aMap[pT->nMap].iIter = pT->aPoslistToIter[iMin]; + pT->aMap[pT->nMap].iRowid = iRowid; + pT->nMap++; + } + } + + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + } + } +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function advances the iterator. If +** argument bFrom is false, then the iterator is advanced to the next +** entry. Or, if bFrom is true, it is advanced to the first entry with +** a rowid of iFrom or greater. +*/ +static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){ + int ii; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5Index *pIndex = pIter->pIndex; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 + && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowidbase.bEof==0 + && p->base.iRowidrc==SQLITE_OK + ){ + fts5MultiIterNext(pIndex, p, 0, 0); + } + } + } + + if( pIndex->rc==SQLITE_OK ){ + fts5IterSetOutputsTokendata(pIter); + } +} + +/* +** If the segment-iterator passed as the first argument is at EOF, then +** set pIter->term to a copy of buffer pTerm. +*/ +static void fts5TokendataSetTermIfEof(Fts5Iter *pIter, Fts5Buffer *pTerm){ + if( pIter && pIter->aSeg[0].pLeaf==0 ){ + fts5BufferSet(&pIter->pIndex->rc, &pIter->aSeg[0].term, pTerm->n, pTerm->p); + } +} + +/* +** This function sets up an iterator to use for a non-prefix query on a +** tokendata=1 table. +*/ +static Fts5Iter *fts5SetupTokendataIter( + Fts5Index *p, /* FTS index to query */ + const u8 *pToken, /* Buffer containing query term */ + int nToken, /* Size of buffer pToken in bytes */ + Fts5Colset *pColset /* Colset to filter on */ +){ + Fts5Iter *pRet = 0; + Fts5TokenDataIter *pSet = 0; + Fts5Structure *pStruct = 0; + const int flags = FTS5INDEX_QUERY_SCANONETERM | FTS5INDEX_QUERY_SCAN; + + Fts5Buffer bSeek = {0, 0, 0}; + Fts5Buffer *pSmall = 0; + + fts5IndexFlush(p); + pStruct = fts5StructureRead(p); + + while( p->rc==SQLITE_OK ){ + Fts5Iter *pPrev = pSet ? pSet->apIter[pSet->nIter-1] : 0; + Fts5Iter *pNew = 0; + Fts5SegIter *pNewIter = 0; + Fts5SegIter *pPrevIter = 0; + + int iLvl, iSeg, ii; + + pNew = fts5MultiIterAlloc(p, pStruct->nSegment); + if( pSmall ){ + fts5BufferSet(&p->rc, &bSeek, pSmall->n, pSmall->p); + fts5BufferAppendBlob(&p->rc, &bSeek, 1, (const u8*)"\0"); + }else{ + fts5BufferSet(&p->rc, &bSeek, nToken, pToken); + } + if( p->rc ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + pNewIter = &pNew->aSeg[0]; + pPrevIter = (pPrev ? &pPrev->aSeg[0] : 0); + for(iLvl=0; iLvlnLevel; iLvl++){ + for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + int bDone = 0; + + if( pPrevIter ){ + if( fts5BufferCompare(pSmall, &pPrevIter->term) ){ + memcpy(pNewIter, pPrevIter, sizeof(Fts5SegIter)); + memset(pPrevIter, 0, sizeof(Fts5SegIter)); + bDone = 1; + }else if( pPrevIter->iEndofDoclist>pPrevIter->pLeaf->szLeaf ){ + fts5SegIterNextInit(p,(const char*)bSeek.p,bSeek.n-1,pSeg,pNewIter); + bDone = 1; + } + } + + if( bDone==0 ){ + fts5SegIterSeekInit(p, bSeek.p, bSeek.n, flags, pSeg, pNewIter); + } + + if( pPrevIter ){ + if( pPrevIter->pTombArray ){ + pNewIter->pTombArray = pPrevIter->pTombArray; + pNewIter->pTombArray->nRef++; + } + }else{ + fts5SegIterAllocTombstone(p, pNewIter); + } + + pNewIter++; + if( pPrevIter ) pPrevIter++; + if( p->rc ) break; + } + } + fts5TokendataSetTermIfEof(pPrev, pSmall); + + pNew->bSkipEmpty = 1; + pNew->pColset = pColset; + fts5IterSetOutputCb(&p->rc, pNew); + + /* Loop through all segments in the new iterator. Find the smallest + ** term that any segment-iterator points to. Iterator pNew will be + ** used for this term. Also, set any iterator that points to a term that + ** does not match pToken/nToken to point to EOF */ + pSmall = 0; + for(ii=0; iinSeg; ii++){ + Fts5SegIter *pII = &pNew->aSeg[ii]; + if( 0==fts5IsTokendataPrefix(&pII->term, pToken, nToken) ){ + fts5SegIterSetEOF(pII); + } + if( pII->pLeaf && (!pSmall || fts5BufferCompare(pSmall, &pII->term)>0) ){ + pSmall = &pII->term; + } + } + + /* If pSmall is still NULL at this point, then the new iterator does + ** not point to any terms that match the query. So delete it and break + ** out of the loop - all required iterators have been collected. */ + if( pSmall==0 ){ + sqlite3Fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + /* Append this iterator to the set and continue. */ + pSet = fts5AppendTokendataIter(p, pSet, pNew); + } + + if( p->rc==SQLITE_OK && pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Iter *pIter = pSet->apIter[ii]; + int iSeg; + for(iSeg=0; iSegnSeg; iSeg++){ + pIter->aSeg[iSeg].flags |= FTS5_SEGITER_ONETERM; + } + fts5MultiIterFinishSetup(p, pIter); + } + } + + if( p->rc==SQLITE_OK ){ + pRet = fts5MultiIterAlloc(p, 0); + } + if( pRet ){ + pRet->pTokenDataIter = pSet; + if( pSet ){ + fts5IterSetOutputsTokendata(pRet); + }else{ + pRet->base.bEof = 1; + } + }else{ + fts5TokendataIterDelete(pSet); + } + + fts5StructureRelease(pStruct); + fts5BufferFree(&bSeek); + return pRet; +} + + /* ** Open a new iterator to iterate though all rowid that match the ** specified token or token prefix. @@ -238095,8 +251354,13 @@ static int sqlite3Fts5IndexQuery( if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){ int iIdx = 0; /* Index to search */ int iPrefixIdx = 0; /* +1 prefix index */ + int bTokendata = pConfig->bTokendata; if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); + if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){ + bTokendata = 0; + } + /* Figure out which index to search and set iIdx accordingly. If this ** is a prefix query for which there is no prefix index, set iIdx to ** greater than pConfig->nPrefix to indicate that the query will be @@ -238122,7 +251386,10 @@ static int sqlite3Fts5IndexQuery( } } - if( iIdx<=pConfig->nPrefix ){ + if( bTokendata && iIdx==0 ){ + buf.p[0] = '0'; + pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset); + }else if( iIdx<=pConfig->nPrefix ){ /* Straight index lookup */ Fts5Structure *pStruct = fts5StructureRead(p); buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx); @@ -238169,7 +251436,11 @@ static int sqlite3Fts5IndexQuery( static int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; assert( pIter->pIndex->rc==SQLITE_OK ); - fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + if( pIter->pTokenDataIter ){ + fts5TokendataIterNext(pIter, 0, 0); + }else{ + fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + } return fts5IndexReturn(pIter->pIndex); } @@ -238202,7 +251473,11 @@ static int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){ */ static int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + if( pIter->pTokenDataIter ){ + fts5TokendataIterNext(pIter, 1, iMatch); + }else{ + fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + } return fts5IndexReturn(pIter->pIndex); } @@ -238217,6 +251492,99 @@ static const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ return (z ? &z[1] : 0); } +/* +** This is used by xInstToken() to access the token at offset iOff, column +** iCol of row iRowid. The token is returned via output variables *ppOut +** and *pnOut. The iterator passed as the first argument must be a tokendata=1 +** iterator (pIter->pTokenDataIter!=0). +*/ +static int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5TokenDataMap *aMap = pT->aMap; + i64 iPos = (((i64)iCol)<<32) + iOff; + + int i1 = 0; + int i2 = pT->nMap; + int iTest = 0; + + while( i2>i1 ){ + iTest = (i1 + i2) / 2; + + if( aMap[iTest].iRowidiRowid ){ + i2 = iTest; + }else{ + if( aMap[iTest].iPosiPos ){ + i2 = iTest; + }else{ + break; + } + } + } + + if( i2>i1 ){ + Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; + *ppOut = (const char*)pMap->aSeg[0].term.p+1; + *pnOut = pMap->aSeg[0].term.n-1; + } + + return SQLITE_OK; +} + +/* +** Clear any existing entries from the token-map associated with the +** iterator passed as the only argument. +*/ +static void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + if( pIter && pIter->pTokenDataIter ){ + pIter->pTokenDataIter->nMap = 0; + } +} + +/* +** Set a token-mapping for the iterator passed as the first argument. This +** is used in detail=column or detail=none mode when a token is requested +** using the xInstToken() API. In this case the caller tokenizers the +** current row and configures the token-mapping via multiple calls to this +** function. +*/ +static int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, int iCol, int iOff +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5Index *p = pIter->pIndex; + int ii; + + assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL ); + assert( pIter->pTokenDataIter ); + + for(ii=0; iinIter; ii++){ + Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; + if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; + } + if( iinIter ){ + fts5TokendataIterAppendMap(p, pT, ii, iRowid, (((i64)iCol)<<32) + iOff); + } + return fts5IndexReturn(p); +} + /* ** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery(). */ @@ -238224,6 +251592,7 @@ static void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ if( pIndexIter ){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; Fts5Index *pIndex = pIter->pIndex; + fts5TokendataIterDelete(pIter->pTokenDataIter); fts5MultiIterFree(pIter); sqlite3Fts5IndexCloseReader(pIndex); } @@ -238307,6 +251676,347 @@ static int sqlite3Fts5IndexLoadConfig(Fts5Index *p){ return fts5IndexReturn(p); } +/* +** Retrieve the origin value that will be used for the segment currently +** being accumulated in the in-memory hash table when it is flushed to +** disk. If successful, SQLITE_OK is returned and (*piOrigin) set to +** the queried value. Or, if an error occurs, an error code is returned +** and the final value of (*piOrigin) is undefined. +*/ +static int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin){ + Fts5Structure *pStruct; + pStruct = fts5StructureRead(p); + if( pStruct ){ + *piOrigin = pStruct->nOriginCntr; + fts5StructureRelease(pStruct); + } + return fts5IndexReturn(p); +} + +/* +** Buffer pPg contains a page of a tombstone hash table - one of nPg pages +** associated with the same segment. This function adds rowid iRowid to +** the hash table. The caller is required to guarantee that there is at +** least one free slot on the page. +** +** If parameter bForce is false and the hash table is deemed to be full +** (more than half of the slots are occupied), then non-zero is returned +** and iRowid not inserted. Or, if bForce is true or if the hash table page +** is not full, iRowid is inserted and zero returned. +*/ +static int fts5IndexTombstoneAddToPage( + Fts5Data *pPg, + int bForce, + int nPg, + u64 iRowid +){ + const int szKey = TOMBSTONE_KEYSIZE(pPg); + const int nSlot = TOMBSTONE_NSLOT(pPg); + const int nElem = fts5GetU32(&pPg->p[4]); + int iSlot = (iRowid / nPg) % nSlot; + int nCollide = nSlot; + + if( szKey==4 && iRowid>0xFFFFFFFF ) return 2; + if( iRowid==0 ){ + pPg->p[1] = 0x01; + return 0; + } + + if( bForce==0 && nElem>=(nSlot/2) ){ + return 1; + } + + fts5PutU32(&pPg->p[4], nElem+1); + if( szKey==4 ){ + u32 *aSlot = (u32*)&pPg->p[8]; + while( aSlot[iSlot] ){ + iSlot = (iSlot + 1) % nSlot; + if( nCollide--==0 ) return 0; + } + fts5PutU32((u8*)&aSlot[iSlot], (u32)iRowid); + }else{ + u64 *aSlot = (u64*)&pPg->p[8]; + while( aSlot[iSlot] ){ + iSlot = (iSlot + 1) % nSlot; + if( nCollide--==0 ) return 0; + } + fts5PutU64((u8*)&aSlot[iSlot], iRowid); + } + + return 0; +} + +/* +** This function attempts to build a new hash containing all the keys +** currently in the tombstone hash table for segment pSeg. The new +** hash will be stored in the nOut buffers passed in array apOut[]. +** All pages of the new hash use key-size szKey (4 or 8). +** +** Return 0 if the hash is successfully rebuilt into the nOut pages. +** Or non-zero if it is not (because one page became overfull). In this +** case the caller should retry with a larger nOut parameter. +** +** Parameter pData1 is page iPg1 of the hash table being rebuilt. +*/ +static int fts5IndexTombstoneRehash( + Fts5Index *p, + Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */ + Fts5Data *pData1, /* One page of current hash - or NULL */ + int iPg1, /* Which page of the current hash is pData1 */ + int szKey, /* 4 or 8, the keysize */ + int nOut, /* Number of output pages */ + Fts5Data **apOut /* Array of output hash pages */ +){ + int ii; + int res = 0; + + /* Initialize the headers of all the output pages */ + for(ii=0; iip[0] = szKey; + fts5PutU32(&apOut[ii]->p[4], 0); + } + + /* Loop through the current pages of the hash table. */ + for(ii=0; res==0 && iinPgTombstone; ii++){ + Fts5Data *pData = 0; /* Page ii of the current hash table */ + Fts5Data *pFree = 0; /* Free this at the end of the loop */ + + if( iPg1==ii ){ + pData = pData1; + }else{ + pFree = pData = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii)); + } + + if( pData ){ + int szKeyIn = TOMBSTONE_KEYSIZE(pData); + int nSlotIn = (pData->nn - 8) / szKeyIn; + int iIn; + for(iIn=0; iInp[8]; + if( aSlot[iIn] ) iVal = fts5GetU32((u8*)&aSlot[iIn]); + }else{ + u64 *aSlot = (u64*)&pData->p[8]; + if( aSlot[iIn] ) iVal = fts5GetU64((u8*)&aSlot[iIn]); + } + + /* If iVal is not 0 at this point, insert it into the new hash table */ + if( iVal ){ + Fts5Data *pPg = apOut[(iVal % nOut)]; + res = fts5IndexTombstoneAddToPage(pPg, 0, nOut, iVal); + if( res ) break; + } + } + + /* If this is page 0 of the old hash, copy the rowid-0-flag from the + ** old hash to the new. */ + if( ii==0 ){ + apOut[0]->p[1] = pData->p[1]; + } + } + fts5DataRelease(pFree); + } + + return res; +} + +/* +** This is called to rebuild the hash table belonging to segment pSeg. +** If parameter pData1 is not NULL, then one page of the existing hash table +** has already been loaded - pData1, which is page iPg1. The key-size for +** the new hash table is szKey (4 or 8). +** +** If successful, the new hash table is not written to disk. Instead, +** output parameter (*pnOut) is set to the number of pages in the new +** hash table, and (*papOut) to point to an array of buffers containing +** the new page data. +** +** If an error occurs, an error code is left in the Fts5Index object and +** both output parameters set to 0 before returning. +*/ +static void fts5IndexTombstoneRebuild( + Fts5Index *p, + Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */ + Fts5Data *pData1, /* One page of current hash - or NULL */ + int iPg1, /* Which page of the current hash is pData1 */ + int szKey, /* 4 or 8, the keysize */ + int *pnOut, /* OUT: Number of output pages */ + Fts5Data ***papOut /* OUT: Output hash pages */ +){ + const int MINSLOT = 32; + int nSlotPerPage = MAX(MINSLOT, (p->pConfig->pgsz - 8) / szKey); + int nSlot = 0; /* Number of slots in each output page */ + int nOut = 0; + + /* Figure out how many output pages (nOut) and how many slots per + ** page (nSlot). There are three possibilities: + ** + ** 1. The hash table does not yet exist. In this case the new hash + ** table will consist of a single page with MINSLOT slots. + ** + ** 2. The hash table exists but is currently a single page. In this + ** case an attempt is made to grow the page to accommodate the new + ** entry. The page is allowed to grow up to nSlotPerPage (see above) + ** slots. + ** + ** 3. The hash table already consists of more than one page, or of + ** a single page already so large that it cannot be grown. In this + ** case the new hash consists of (nPg*2+1) pages of nSlotPerPage + ** slots each, where nPg is the current number of pages in the + ** hash table. + */ + if( pSeg->nPgTombstone==0 ){ + /* Case 1. */ + nOut = 1; + nSlot = MINSLOT; + }else if( pSeg->nPgTombstone==1 ){ + /* Case 2. */ + int nElem = (int)fts5GetU32(&pData1->p[4]); + assert( pData1 && iPg1==0 ); + nOut = 1; + nSlot = MAX(nElem*4, MINSLOT); + if( nSlot>nSlotPerPage ) nOut = 0; + } + if( nOut==0 ){ + /* Case 3. */ + nOut = (pSeg->nPgTombstone * 2 + 1); + nSlot = nSlotPerPage; + } + + /* Allocate the required array and output pages */ + while( 1 ){ + int res = 0; + int ii = 0; + int szPage = 0; + Fts5Data **apOut = 0; + + /* Allocate space for the new hash table */ + assert( nSlot>=MINSLOT ); + apOut = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data*) * nOut); + szPage = 8 + nSlot*szKey; + for(ii=0; iirc, + sizeof(Fts5Data)+szPage + ); + if( pNew ){ + pNew->nn = szPage; + pNew->p = (u8*)&pNew[1]; + apOut[ii] = pNew; + } + } + + /* Rebuild the hash table. */ + if( p->rc==SQLITE_OK ){ + res = fts5IndexTombstoneRehash(p, pSeg, pData1, iPg1, szKey, nOut, apOut); + } + if( res==0 ){ + if( p->rc ){ + fts5IndexFreeArray(apOut, nOut); + apOut = 0; + nOut = 0; + } + *pnOut = nOut; + *papOut = apOut; + break; + } + + /* If control flows to here, it was not possible to rebuild the hash + ** table. Free all buffers and then try again with more pages. */ + assert( p->rc==SQLITE_OK ); + fts5IndexFreeArray(apOut, nOut); + nSlot = nSlotPerPage; + nOut = nOut*2 + 1; + } +} + + +/* +** Add a tombstone for rowid iRowid to segment pSeg. +*/ +static void fts5IndexTombstoneAdd( + Fts5Index *p, + Fts5StructureSegment *pSeg, + u64 iRowid +){ + Fts5Data *pPg = 0; + int iPg = -1; + int szKey = 0; + int nHash = 0; + Fts5Data **apHash = 0; + + p->nContentlessDelete++; + + if( pSeg->nPgTombstone>0 ){ + iPg = iRowid % pSeg->nPgTombstone; + pPg = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg)); + if( pPg==0 ){ + assert( p->rc!=SQLITE_OK ); + return; + } + + if( 0==fts5IndexTombstoneAddToPage(pPg, 0, pSeg->nPgTombstone, iRowid) ){ + fts5DataWrite(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg), pPg->p, pPg->nn); + fts5DataRelease(pPg); + return; + } + } + + /* Have to rebuild the hash table. First figure out the key-size (4 or 8). */ + szKey = pPg ? TOMBSTONE_KEYSIZE(pPg) : 4; + if( iRowid>0xFFFFFFFF ) szKey = 8; + + /* Rebuild the hash table */ + fts5IndexTombstoneRebuild(p, pSeg, pPg, iPg, szKey, &nHash, &apHash); + assert( p->rc==SQLITE_OK || (nHash==0 && apHash==0) ); + + /* If all has succeeded, write the new rowid into one of the new hash + ** table pages, then write them all out to disk. */ + if( nHash ){ + int ii = 0; + fts5IndexTombstoneAddToPage(apHash[iRowid % nHash], 1, nHash, iRowid); + for(ii=0; iiiSegid, ii); + fts5DataWrite(p, iTombstoneRowid, apHash[ii]->p, apHash[ii]->nn); + } + pSeg->nPgTombstone = nHash; + fts5StructureWrite(p, p->pStruct); + } + + fts5DataRelease(pPg); + fts5IndexFreeArray(apHash, nHash); +} + +/* +** Add iRowid to the tombstone list of the segment or segments that contain +** rows from origin iOrigin. Return SQLITE_OK if successful, or an SQLite +** error code otherwise. +*/ +static int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid){ + Fts5Structure *pStruct; + pStruct = fts5StructureRead(p); + if( pStruct ){ + int bFound = 0; /* True after pSeg->nEntryTombstone incr. */ + int iLvl; + for(iLvl=pStruct->nLevel-1; iLvl>=0; iLvl--){ + int iSeg; + for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + if( pSeg->iOrigin1<=(u64)iOrigin && pSeg->iOrigin2>=(u64)iOrigin ){ + if( bFound==0 ){ + pSeg->nEntryTombstone++; + bFound = 1; + } + fts5IndexTombstoneAdd(p, pSeg, iRowid); + } + } + } + fts5StructureRelease(pStruct); + } + return fts5IndexReturn(p); +} /************************************************************************* ************************************************************************** @@ -238390,7 +252100,9 @@ static int fts5QueryCksum( int eDetail = p->pConfig->eDetail; u64 cksum = *pCksum; Fts5IndexIter *pIter = 0; - int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter); + int rc = sqlite3Fts5IndexQuery( + p, z, n, (flags | FTS5INDEX_QUERY_NOTOKENDATA), 0, &pIter + ); while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){ i64 rowid = pIter->iRowid; @@ -238557,7 +252269,7 @@ static void fts5IndexIntegrityCheckEmpty( } static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ - int iTermOff = 0; + i64 iTermOff = 0; int ii; Fts5Buffer buf1 = {0,0,0}; @@ -238566,7 +252278,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ ii = pLeaf->szLeaf; while( iinn && p->rc==SQLITE_OK ){ int res; - int iOff; + i64 iOff; int nIncr; ii += fts5GetVarint32(&pLeaf->p[ii], nIncr); @@ -238611,6 +252323,7 @@ static void fts5IndexIntegrityCheckSegment( Fts5StructureSegment *pSeg /* Segment to check internal consistency */ ){ Fts5Config *pConfig = p->pConfig; + int bSecureDelete = (pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE); sqlite3_stmt *pStmt = 0; int rc2; int iIdxPrevLeaf = pSeg->pgnoFirst-1; @@ -238646,7 +252359,19 @@ static void fts5IndexIntegrityCheckSegment( ** is also a rowid pointer within the leaf page header, it points to a ** location before the term. */ if( pLeaf->nn<=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + + if( nIdxTerm==0 + && pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE + && pLeaf->nn==pLeaf->szLeaf + && pLeaf->nn==4 + ){ + /* special case - the very first page in a segment keeps its %_idx + ** entry even if all the terms are removed from it by secure-delete + ** operations. */ + }else{ + p->rc = FTS5_CORRUPT; + } + }else{ int iOff; /* Offset of first term on leaf */ int iRowidOff; /* Offset of first rowid on leaf */ @@ -238710,9 +252435,12 @@ static void fts5IndexIntegrityCheckSegment( ASSERT_SZLEAF_OK(pLeaf); if( iRowidOff>=pLeaf->szLeaf ){ p->rc = FTS5_CORRUPT; - }else{ + }else if( bSecureDelete==0 || iRowidOff>0 ){ + i64 iDlRowid = fts5DlidxIterRowid(pDlidx); fts5GetVarint(&pLeaf->p[iRowidOff], (u64*)&iRowid); - if( iRowid!=fts5DlidxIterRowid(pDlidx) ) p->rc = FTS5_CORRUPT; + if( iRowidrc = FTS5_CORRUPT; + } } fts5DataRelease(pLeaf); } @@ -238806,6 +252534,7 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum /* If this is a new term, query for it. Update cksum3 with the results. */ fts5TestTerm(p, &term, z, n, cksum2, &cksum3); + if( p->rc ) break; if( eDetail==FTS5_DETAIL_NONE ){ if( 0==fts5MultiIterIsEmpty(p, pIter) ){ @@ -238841,13 +252570,14 @@ static int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum ** function only. */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** Decode a segment-data rowid from the %_data table. This function is ** the opposite of macro FTS5_SEGMENT_ROWID(). */ static void fts5DecodeRowid( i64 iRowid, /* Rowid from %_data table */ + int *pbTombstone, /* OUT: Tombstone hash flag */ int *piSegid, /* OUT: Segment id */ int *pbDlidx, /* OUT: Dlidx flag */ int *piHeight, /* OUT: Height */ @@ -238863,13 +252593,16 @@ static void fts5DecodeRowid( iRowid >>= FTS5_DATA_DLI_B; *piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1)); + iRowid >>= FTS5_DATA_ID_B; + + *pbTombstone = (int)(iRowid & 0x0001); } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ - int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */ - fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno); + int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid compenents */ + fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); if( iSegid==0 ){ if( iKey==FTS5_AVERAGES_ROWID ){ @@ -238879,14 +252612,16 @@ static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ } } else{ - sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%ssegid=%d h=%d pgno=%d}", - bDlidx ? "dlidx " : "", iSegid, iHeight, iPgno + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%s%ssegid=%d h=%d pgno=%d}", + bDlidx ? "dlidx " : "", + bTomb ? "tombstone " : "", + iSegid, iHeight, iPgno ); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugStructure( int *pRc, /* IN/OUT: error code */ Fts5Buffer *pBuf, @@ -238901,16 +252636,22 @@ static void fts5DebugStructure( ); for(iSeg=0; iSegnSeg; iSeg++){ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg]; - sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d}", + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d", pSeg->iSegid, pSeg->pgnoFirst, pSeg->pgnoLast ); + if( pSeg->iOrigin1>0 ){ + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " origin=%lld..%lld", + pSeg->iOrigin1, pSeg->iOrigin2 + ); + } + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}"); } sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}"); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This is part of the fts5_decode() debugging aid. ** @@ -238935,9 +252676,9 @@ static void fts5DecodeStructure( fts5DebugStructure(pRc, pBuf, p); fts5StructureRelease(p); } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This is part of the fts5_decode() debugging aid. ** @@ -238960,9 +252701,9 @@ static void fts5DecodeAverages( zSpace = " "; } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** Buffer (a/n) is assumed to contain a list of serialized varints. Read ** each varint and append its string representation to buffer pBuf. Return @@ -238979,9 +252720,9 @@ static int fts5DecodePoslist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){ } return iOff; } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The start of buffer (a/n) contains the start of a doclist. The doclist ** may or may not finish within the buffer. This function appends a text @@ -239014,9 +252755,9 @@ static int fts5DecodeDoclist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){ return iOff; } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This function is part of the fts5_decode() debugging function. It is ** only ever used with detail=none tables. @@ -239057,9 +252798,27 @@ static void fts5DecodeRowidList( sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) +static void fts5BufferAppendTerm(int *pRc, Fts5Buffer *pBuf, Fts5Buffer *pTerm){ + int ii; + fts5BufferGrow(pRc, pBuf, pTerm->n*2 + 1); + if( *pRc==SQLITE_OK ){ + for(ii=0; iin; ii++){ + if( pTerm->p[ii]==0x00 ){ + pBuf->p[pBuf->n++] = '\\'; + pBuf->p[pBuf->n++] = '0'; + }else{ + pBuf->p[pBuf->n++] = pTerm->p[ii]; + } + } + pBuf->p[pBuf->n] = 0x00; + } +} +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ + +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The implementation of user-defined scalar function fts5_decode(). */ @@ -239070,6 +252829,7 @@ static void fts5DecodeFunction( ){ i64 iRowid; /* Rowid for record being decoded */ int iSegid,iHeight,iPgno,bDlidx;/* Rowid components */ + int bTomb; const u8 *aBlob; int n; /* Record to decode */ u8 *a = 0; Fts5Buffer s; /* Build up text to return here */ @@ -239092,7 +252852,7 @@ static void fts5DecodeFunction( if( a==0 ) goto decode_out; if( n>0 ) memcpy(a, aBlob, n); - fts5DecodeRowid(iRowid, &iSegid, &bDlidx, &iHeight, &iPgno); + fts5DecodeRowid(iRowid, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); fts5DebugRowid(&rc, &s, iRowid); if( bDlidx ){ @@ -239111,6 +252871,28 @@ static void fts5DecodeFunction( " %d(%lld)", lvl.iLeafPgno, lvl.iRowid ); } + }else if( bTomb ){ + u32 nElem = fts5GetU32(&a[4]); + int szKey = (aBlob[0]==4 || aBlob[0]==8) ? aBlob[0] : 8; + int nSlot = (n - 8) / szKey; + int ii; + sqlite3Fts5BufferAppendPrintf(&rc, &s, " nElem=%d", (int)nElem); + if( aBlob[1] ){ + sqlite3Fts5BufferAppendPrintf(&rc, &s, " 0"); + } + for(ii=0; iiestimatedCost = (double)100; + pIdxInfo->estimatedRows = 100; + pIdxInfo->idxNum = 0; + for(i=0, p=pIdxInfo->aConstraint; inConstraint; i++, p++){ + if( p->usable==0 ) continue; + if( p->op==SQLITE_INDEX_CONSTRAINT_EQ && p->iColumn==11 ){ + rc = SQLITE_OK; + pIdxInfo->aConstraintUsage[i].omit = 1; + pIdxInfo->aConstraintUsage[i].argvIndex = 1; + break; + } + } + return rc; +} + +/* +** This method is the destructor for bytecodevtab objects. +*/ +static int fts5structDisconnectMethod(sqlite3_vtab *pVtab){ + Fts5StructVtab *p = (Fts5StructVtab*)pVtab; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Constructor for a new bytecodevtab_cursor object. +*/ +static int fts5structOpenMethod(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCsr){ + int rc = SQLITE_OK; + Fts5StructVcsr *pNew = 0; + + pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + *ppCsr = (sqlite3_vtab_cursor*)pNew; + + return SQLITE_OK; +} + +/* +** Destructor for a bytecodevtab_cursor. +*/ +static int fts5structCloseMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + fts5StructureRelease(pCsr->pStruct); + sqlite3_free(pCsr); + return SQLITE_OK; +} + + +/* +** Advance a bytecodevtab_cursor to its next row of output. +*/ +static int fts5structNextMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + Fts5Structure *p = pCsr->pStruct; + + assert( pCsr->pStruct ); + pCsr->iSeg++; + pCsr->iRowid++; + while( pCsr->iLevelnLevel && pCsr->iSeg>=p->aLevel[pCsr->iLevel].nSeg ){ + pCsr->iLevel++; + pCsr->iSeg = 0; + } + if( pCsr->iLevel>=p->nLevel ){ + fts5StructureRelease(pCsr->pStruct); + pCsr->pStruct = 0; + } + return SQLITE_OK; +} + +/* +** Return TRUE if the cursor has been moved off of the last +** row of output. +*/ +static int fts5structEofMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + return pCsr->pStruct==0; +} + +static int fts5structRowidMethod( + sqlite3_vtab_cursor *cur, + sqlite_int64 *piRowid +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + *piRowid = pCsr->iRowid; + return SQLITE_OK; +} + +/* +** Return values of columns for the row at which the bytecodevtab_cursor +** is currently pointing. +*/ +static int fts5structColumnMethod( + sqlite3_vtab_cursor *cur, /* The cursor */ + sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ + int i /* Which column to return */ +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + Fts5Structure *p = pCsr->pStruct; + Fts5StructureSegment *pSeg = &p->aLevel[pCsr->iLevel].aSeg[pCsr->iSeg]; + + switch( i ){ + case 0: /* level */ + sqlite3_result_int(ctx, pCsr->iLevel); + break; + case 1: /* segment */ + sqlite3_result_int(ctx, pCsr->iSeg); + break; + case 2: /* merge */ + sqlite3_result_int(ctx, pCsr->iSeg < p->aLevel[pCsr->iLevel].nMerge); + break; + case 3: /* segid */ + sqlite3_result_int(ctx, pSeg->iSegid); + break; + case 4: /* leaf1 */ + sqlite3_result_int(ctx, pSeg->pgnoFirst); + break; + case 5: /* leaf2 */ + sqlite3_result_int(ctx, pSeg->pgnoLast); + break; + case 6: /* origin1 */ + sqlite3_result_int64(ctx, pSeg->iOrigin1); + break; + case 7: /* origin2 */ + sqlite3_result_int64(ctx, pSeg->iOrigin2); + break; + case 8: /* npgtombstone */ + sqlite3_result_int(ctx, pSeg->nPgTombstone); + break; + case 9: /* nentrytombstone */ + sqlite3_result_int64(ctx, pSeg->nEntryTombstone); + break; + case 10: /* nentry */ + sqlite3_result_int64(ctx, pSeg->nEntry); + break; + } + return SQLITE_OK; +} + +/* +** Initialize a cursor. +** +** idxNum==0 means show all subprograms +** idxNum==1 means show only the main bytecode and omit subprograms. +*/ +static int fts5structFilterMethod( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr *)pVtabCursor; + int rc = SQLITE_OK; + + const u8 *aBlob = 0; + int nBlob = 0; + + assert( argc==1 ); + fts5StructureRelease(pCsr->pStruct); + pCsr->pStruct = 0; + + nBlob = sqlite3_value_bytes(argv[0]); + aBlob = (const u8*)sqlite3_value_blob(argv[0]); + rc = fts5StructureDecode(aBlob, nBlob, 0, &pCsr->pStruct); + if( rc==SQLITE_OK ){ + pCsr->iLevel = 0; + pCsr->iRowid = 0; + pCsr->iSeg = -1; + rc = fts5structNextMethod(pVtabCursor); + } + + return rc; +} + +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ /* ** This is called as part of registering the FTS5 module with database @@ -239315,7 +253326,7 @@ static void fts5RowidFunction( ** SQLite error code is returned instead. */ static int sqlite3Fts5IndexInit(sqlite3 *db){ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) int rc = sqlite3_create_function( db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0 ); @@ -239332,6 +253343,37 @@ static int sqlite3Fts5IndexInit(sqlite3 *db){ db, "fts5_rowid", -1, SQLITE_UTF8, 0, fts5RowidFunction, 0, 0 ); } + + if( rc==SQLITE_OK ){ + static const sqlite3_module fts5structure_module = { + 0, /* iVersion */ + 0, /* xCreate */ + fts5structConnectMethod, /* xConnect */ + fts5structBestIndexMethod, /* xBestIndex */ + fts5structDisconnectMethod, /* xDisconnect */ + 0, /* xDestroy */ + fts5structOpenMethod, /* xOpen */ + fts5structCloseMethod, /* xClose */ + fts5structFilterMethod, /* xFilter */ + fts5structNextMethod, /* xNext */ + fts5structEofMethod, /* xEof */ + fts5structColumnMethod, /* xColumn */ + fts5structRowidMethod, /* xRowid */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindFunction */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0, /* xRollbackTo */ + 0, /* xShadowName */ + 0 /* xIntegrity */ + }; + rc = sqlite3_create_module(db, "fts5_structure", &fts5structure_module, 0); + } return rc; #else return SQLITE_OK; @@ -239467,6 +253509,8 @@ struct Fts5FullTable { Fts5Storage *pStorage; /* Document store */ Fts5Global *pGlobal; /* Global (connection wide) data */ Fts5Cursor *pSortCsr; /* Sort data from this cursor */ + int iSavepoint; /* Successful xSavepoint()+1 */ + #ifdef SQLITE_DEBUG struct Fts5TransactionState ts; #endif @@ -239610,7 +253654,7 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ break; case FTS5_SYNC: - assert( p->ts.eState==1 ); + assert( p->ts.eState==1 || p->ts.eState==2 ); p->ts.eState = 2; break; @@ -239625,21 +253669,21 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ break; case FTS5_SAVEPOINT: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint>=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint; break; case FTS5_RELEASE: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint<=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint-1; break; case FTS5_ROLLBACKTO: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=-1 ); /* The following assert() can fail if another vtab strikes an error ** within an xSavepoint() call then SQLite calls xRollbackTo() - without @@ -239755,6 +253799,13 @@ static int fts5InitVtab( pConfig->pzErrmsg = 0; } + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + rc = sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, (int)1); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); + } + if( rc!=SQLITE_OK ){ fts5FreeVtab(pTab); pTab = 0; @@ -239997,12 +254048,15 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ } idxStr[iIdxStr] = '\0'; - /* Set idxFlags flags for the ORDER BY clause */ + /* Set idxFlags flags for the ORDER BY clause + ** + ** Note that tokendata=1 tables cannot currently handle "ORDER BY rowid DESC". + */ if( pInfo->nOrderBy==1 ){ int iSort = pInfo->aOrderBy[0].iColumn; if( iSort==(pConfig->nCol+1) && bSeenMatch ){ idxFlags |= FTS5_BI_ORDER_RANK; - }else if( iSort==-1 ){ + }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){ idxFlags |= FTS5_BI_ORDER_ROWID; } if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){ @@ -240254,6 +254308,16 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ ); assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) ); + /* If this cursor uses FTS5_PLAN_MATCH and this is a tokendata=1 table, + ** clear any token mappings accumulated at the fts5_index.c level. In + ** other cases, specifically FTS5_PLAN_SOURCE and FTS5_PLAN_SORTED_MATCH, + ** we need to retain the mappings for the entire query. */ + if( pCsr->ePlan==FTS5_PLAN_MATCH + && ((Fts5Table*)pCursor->pVtab)->pConfig->bTokendata + ){ + sqlite3Fts5ExprClearTokens(pCsr->pExpr); + } + if( pCsr->ePlan<3 ){ int bSkip = 0; if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc; @@ -240679,6 +254743,9 @@ static int fts5FilterMethod( pCsr->iFirstRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64); } + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + if( rc!=SQLITE_OK ) goto filter_out; + if( pTab->pSortCsr ){ /* If pSortCsr is non-NULL, then this call is being made as part of ** processing for a "... MATCH ORDER BY rank" query (ePlan is @@ -240701,6 +254768,7 @@ static int fts5FilterMethod( pCsr->pExpr = pTab->pSortCsr->pExpr; rc = fts5CursorFirst(pTab, pCsr, bDesc); }else if( pCsr->pExpr ){ + assert( rc==SQLITE_OK ); rc = fts5CursorParseRank(pConfig, pCsr, pRank); if( rc==SQLITE_OK ){ if( bOrderByRank ){ @@ -240872,6 +254940,7 @@ static int fts5SpecialInsert( Fts5Config *pConfig = pTab->p.pConfig; int rc = SQLITE_OK; int bError = 0; + int bLoadConfig = 0; if( 0==sqlite3_stricmp("delete-all", zCmd) ){ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ @@ -240883,6 +254952,7 @@ static int fts5SpecialInsert( }else{ rc = sqlite3Fts5StorageDeleteAll(pTab->pStorage); } + bLoadConfig = 1; }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){ if( pConfig->eContent==FTS5_CONTENT_NONE ){ fts5SetVtabError(pTab, @@ -240892,6 +254962,7 @@ static int fts5SpecialInsert( }else{ rc = sqlite3Fts5StorageRebuild(pTab->pStorage); } + bLoadConfig = 1; }else if( 0==sqlite3_stricmp("optimize", zCmd) ){ rc = sqlite3Fts5StorageOptimize(pTab->pStorage); }else if( 0==sqlite3_stricmp("merge", zCmd) ){ @@ -240904,8 +254975,13 @@ static int fts5SpecialInsert( }else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){ pConfig->bPrefixIndex = sqlite3_value_int(pVal); #endif + }else if( 0==sqlite3_stricmp("flush", zCmd) ){ + rc = sqlite3Fts5FlushToDisk(&pTab->p); }else{ - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + rc = sqlite3Fts5FlushToDisk(&pTab->p); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + } if( rc==SQLITE_OK ){ rc = sqlite3Fts5ConfigSetValue(pTab->p.pConfig, zCmd, pVal, &bError); } @@ -240917,6 +254993,12 @@ static int fts5SpecialInsert( } } } + + if( rc==SQLITE_OK && bLoadConfig ){ + pTab->p.pConfig->iCookie--; + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + } + return rc; } @@ -240973,9 +255055,10 @@ static int fts5UpdateMethod( Fts5Config *pConfig = pTab->p.pConfig; int eType0; /* value_type() of apVal[0] */ int rc = SQLITE_OK; /* Return code */ + int bUpdateOrDelete = 0; /* A transaction must be open when this is called. */ - assert( pTab->ts.eState==1 ); + assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); assert( pVtab->zErrMsg==0 ); assert( nArg==1 || nArg==(2+pConfig->nCol+2) ); @@ -240983,6 +255066,11 @@ static int fts5UpdateMethod( || sqlite3_value_type(apVal[0])==SQLITE_NULL ); assert( pTab->p.pConfig->pzErrmsg==0 ); + if( pConfig->pgsz==0 ){ + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + if( rc!=SQLITE_OK ) return rc; + } + pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg; /* Put any active cursors into REQUIRE_SEEK state. */ @@ -240997,7 +255085,14 @@ static int fts5UpdateMethod( if( pConfig->eContent!=FTS5_CONTENT_NORMAL && 0==sqlite3_stricmp("delete", z) ){ - rc = fts5SpecialDelete(pTab, apVal); + if( pConfig->bContentlessDelete ){ + fts5SetVtabError(pTab, + "'delete' may not be used with a contentless_delete=1 table" + ); + rc = SQLITE_ERROR; + }else{ + rc = fts5SpecialDelete(pTab, apVal); + } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); } @@ -241014,7 +255109,7 @@ static int fts5UpdateMethod( ** Cases 3 and 4 may violate the rowid constraint. */ int eConflict = SQLITE_ABORT; - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL || pConfig->bContentlessDelete ){ eConflict = sqlite3_vtab_on_conflict(pConfig->db); } @@ -241022,8 +255117,12 @@ static int fts5UpdateMethod( assert( nArg!=1 || eType0==SQLITE_INTEGER ); /* Filter out attempts to run UPDATE or DELETE on contentless tables. - ** This is not suported. */ - if( eType0==SQLITE_INTEGER && fts5IsContentless(pTab) ){ + ** This is not suported. Except - they are both supported if the CREATE + ** VIRTUAL TABLE statement contained "contentless_delete=1". */ + if( eType0==SQLITE_INTEGER + && pConfig->eContent==FTS5_CONTENT_NONE + && pConfig->bContentlessDelete==0 + ){ pTab->p.base.zErrMsg = sqlite3_mprintf( "cannot %s contentless fts5 table: %s", (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName @@ -241035,6 +255134,7 @@ static int fts5UpdateMethod( else if( nArg==1 ){ i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); + bUpdateOrDelete = 1; } /* INSERT or UPDATE */ @@ -241046,10 +255146,12 @@ static int fts5UpdateMethod( } else if( eType0!=SQLITE_INTEGER ){ - /* If this is a REPLACE, first remove the current entry (if any) */ + /* An INSERT statement. If the conflict-mode is REPLACE, first remove + ** the current entry (if any). */ if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + bUpdateOrDelete = 1; } fts5StorageInsert(&rc, pTab, apVal, pRowid); } @@ -241078,10 +255180,24 @@ static int fts5UpdateMethod( rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); fts5StorageInsert(&rc, pTab, apVal, pRowid); } + bUpdateOrDelete = 1; } } } + if( rc==SQLITE_OK + && bUpdateOrDelete + && pConfig->bSecureDelete + && pConfig->iVersion==FTS5_CURRENT_VERSION + ){ + rc = sqlite3Fts5StorageConfigValue( + pTab->pStorage, "version", 0, FTS5_CURRENT_VERSION_SECUREDELETE + ); + if( rc==SQLITE_OK ){ + pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; + } + } + pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -241094,8 +255210,7 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; fts5CheckTransactionState(pTab, FTS5_SYNC, 0); pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg; - fts5TripCursors(pTab); - rc = sqlite3Fts5StorageSync(pTab->pStorage); + rc = sqlite3Fts5FlushToDisk(&pTab->p); pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -241191,7 +255306,10 @@ static int fts5ApiColumnText( ){ int rc = SQLITE_OK; Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; - if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) + Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); + if( iCol<0 || iCol>=pTab->pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) || pCsr->ePlan==FTS5_PLAN_SPECIAL ){ *pz = 0; @@ -241216,8 +255334,9 @@ static int fts5CsrPoslist( int rc = SQLITE_OK; int bLive = (pCsr->pSorter==0); - if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ - + if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){ + rc = SQLITE_RANGE; + }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ Fts5PoslistPopulator *aPopulator; int i; @@ -241241,15 +255360,21 @@ static int fts5CsrPoslist( CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST); } - if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ - Fts5Sorter *pSorter = pCsr->pSorter; - int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); - *pn = pSorter->aIdx[iPhrase] - i1; - *pa = &pSorter->aPoslist[i1]; + if( rc==SQLITE_OK ){ + if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ + Fts5Sorter *pSorter = pCsr->pSorter; + int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); + *pn = pSorter->aIdx[iPhrase] - i1; + *pa = &pSorter->aPoslist[i1]; + }else{ + *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + } }else{ - *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + *pa = 0; + *pn = 0; } + return rc; } @@ -241356,12 +255481,6 @@ static int fts5ApiInst( ){ if( iIdx<0 || iIdx>=pCsr->nInstCount ){ rc = SQLITE_RANGE; -#if 0 - }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){ - *piPhrase = pCsr->aInst[iIdx*3]; - *piCol = pCsr->aInst[iIdx*3 + 2]; - *piOff = -1; -#endif }else{ *piPhrase = pCsr->aInst[iIdx*3]; *piCol = pCsr->aInst[iIdx*3 + 1]; @@ -241616,13 +255735,56 @@ static int fts5ApiPhraseFirstColumn( return rc; } +/* +** xQueryToken() API implemenetation. +*/ +static int fts5ApiQueryToken( + Fts5Context* pCtx, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + return sqlite3Fts5ExprQueryToken(pCsr->pExpr, iPhrase, iToken, ppOut, pnOut); +} + +/* +** xInstToken() API implemenetation. +*/ +static int fts5ApiInstToken( + Fts5Context *pCtx, + int iIdx, + int iToken, + const char **ppOut, int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + int rc = SQLITE_OK; + if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0 + || SQLITE_OK==(rc = fts5CacheInstArray(pCsr)) + ){ + if( iIdx<0 || iIdx>=pCsr->nInstCount ){ + rc = SQLITE_RANGE; + }else{ + int iPhrase = pCsr->aInst[iIdx*3]; + int iCol = pCsr->aInst[iIdx*3 + 1]; + int iOff = pCsr->aInst[iIdx*3 + 2]; + i64 iRowid = fts5CursorRowid(pCsr); + rc = sqlite3Fts5ExprInstToken( + pCsr->pExpr, iRowid, iPhrase, iCol, iOff, iToken, ppOut, pnOut + ); + } + } + return rc; +} + static int fts5ApiQueryPhrase(Fts5Context*, int, void*, int(*)(const Fts5ExtensionApi*, Fts5Context*, void*) ); static const Fts5ExtensionApi sFts5Api = { - 2, /* iVersion */ + 3, /* iVersion */ fts5ApiUserData, fts5ApiColumnCount, fts5ApiRowCount, @@ -241642,6 +255804,8 @@ static const Fts5ExtensionApi sFts5Api = { fts5ApiPhraseNext, fts5ApiPhraseFirstColumn, fts5ApiPhraseNextColumn, + fts5ApiQueryToken, + fts5ApiInstToken }; /* @@ -241862,6 +256026,12 @@ static int fts5ColumnMethod( sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); } pConfig->pzErrmsg = 0; + }else if( pConfig->bContentlessDelete && sqlite3_vtab_nochange(pCtx) ){ + char *zErr = sqlite3_mprintf("cannot UPDATE a subset of " + "columns on fts5 contentless-delete table: %s", pConfig->zName + ); + sqlite3_result_error(pCtx, zErr, -1); + sqlite3_free(zErr); } return rc; } @@ -241900,8 +256070,10 @@ static int fts5RenameMethod( sqlite3_vtab *pVtab, /* Virtual table handle */ const char *zName /* New name of table */ ){ + int rc; Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - return sqlite3Fts5StorageRename(pTab->pStorage, zName); + rc = sqlite3Fts5StorageRename(pTab->pStorage, zName); + return rc; } static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){ @@ -241915,9 +256087,15 @@ static int sqlite3Fts5FlushToDisk(Fts5Table *pTab){ ** Flush the contents of the pending-terms table to disk. */ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_SAVEPOINT, iSavepoint); - return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc = SQLITE_OK; + + fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint); + rc = sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; + } + return rc; } /* @@ -241926,9 +256104,16 @@ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ ** This is a no-op. */ static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_RELEASE, iSavepoint); - return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc = SQLITE_OK; + fts5CheckTransactionState(pTab, FTS5_RELEASE, iSavepoint); + if( (iSavepoint+1)iSavepoint ){ + rc = sqlite3Fts5FlushToDisk(&pTab->p); + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint; + } + } + return rc; } /* @@ -241938,10 +256123,14 @@ static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ */ static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ + int rc = SQLITE_OK; fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint); fts5TripCursors(pTab); - return sqlite3Fts5StorageRollback(pTab->pStorage); + if( (iSavepoint+1)<=pTab->iSavepoint ){ + pTab->p.pConfig->pgsz = 0; + rc = sqlite3Fts5StorageRollback(pTab->pStorage); + } + return rc; } /* @@ -242143,7 +256332,7 @@ static void fts5SourceIdFunc( ){ assert( nArg==0 ); UNUSED_PARAM2(nArg, apUnused); - sqlite3_result_text(pCtx, "fts5: 2022-09-29 15:55:41 a29f9949895322123f7c38fbe94c649a9d6e6c9cd0c3b41c96d694552f26b309", -1, SQLITE_TRANSIENT); + sqlite3_result_text(pCtx, "fts5: 2024-04-15 13:34:05 8653b758870e6ef0c98d46b3ace27849054af85da891eb121e9aaa537f1e8355", -1, SQLITE_TRANSIENT); } /* @@ -242161,9 +256350,40 @@ static int fts5ShadowName(const char *zName){ return 0; } +/* +** Run an integrity check on the FTS5 data structures. Return a string +** if anything is found amiss. Return a NULL pointer if everything is +** OK. +*/ +static int fts5IntegrityMethod( + sqlite3_vtab *pVtab, /* the FTS5 virtual table to check */ + const char *zSchema, /* Name of schema in which this table lives */ + const char *zTabname, /* Name of the table itself */ + int isQuick, /* True if this is a quick-check */ + char **pzErr /* Write error message here */ +){ + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc; + + assert( pzErr!=0 && *pzErr==0 ); + UNUSED_PARAM(isQuick); + rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0); + if( (rc&0xff)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", + zSchema, zTabname); + }else if( rc!=SQLITE_OK ){ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS5 table %s.%s: %s", + zSchema, zTabname, sqlite3_errstr(rc)); + } + sqlite3Fts5IndexCloseReader(pTab->p.pIndex); + + return SQLITE_OK; +} + static int fts5Init(sqlite3 *db){ static const sqlite3_module fts5Mod = { - /* iVersion */ 3, + /* iVersion */ 4, /* xCreate */ fts5CreateMethod, /* xConnect */ fts5ConnectMethod, /* xBestIndex */ fts5BestIndexMethod, @@ -242186,7 +256406,8 @@ static int fts5Init(sqlite3 *db){ /* xSavepoint */ fts5SavepointMethod, /* xRelease */ fts5ReleaseMethod, /* xRollbackTo */ fts5RollbackToMethod, - /* xShadowName */ fts5ShadowName + /* xShadowName */ fts5ShadowName, + /* xIntegrity */ fts5IntegrityMethod }; int rc; @@ -242216,7 +256437,9 @@ static int fts5Init(sqlite3 *db){ } if( rc==SQLITE_OK ){ rc = sqlite3_create_function( - db, "fts5_source_id", 0, SQLITE_UTF8, p, fts5SourceIdFunc, 0, 0 + db, "fts5_source_id", 0, + SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, + p, fts5SourceIdFunc, 0, 0 ); } } @@ -242354,10 +256577,10 @@ static int fts5StorageGetStmt( "INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */ "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */ "DELETE FROM %Q.'%q_content' WHERE id=?", /* DELETE_CONTENT */ - "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* REPLACE_DOCSIZE */ + "REPLACE INTO %Q.'%q_docsize' VALUES(?,?%s)", /* REPLACE_DOCSIZE */ "DELETE FROM %Q.'%q_docsize' WHERE id=?", /* DELETE_DOCSIZE */ - "SELECT sz FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */ + "SELECT sz%s FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */ "REPLACE INTO %Q.'%q_config' VALUES(?,?)", /* REPLACE_CONFIG */ "SELECT %s FROM %s AS T", /* SCAN */ @@ -242405,6 +256628,19 @@ static int fts5StorageGetStmt( break; } + case FTS5_STMT_REPLACE_DOCSIZE: + zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, + (pC->bContentlessDelete ? ",?" : "") + ); + break; + + case FTS5_STMT_LOOKUP_DOCSIZE: + zSql = sqlite3_mprintf(azStmt[eStmt], + (pC->bContentlessDelete ? ",origin" : ""), + pC->zDb, pC->zName + ); + break; + default: zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName); break; @@ -242594,9 +256830,11 @@ static int sqlite3Fts5StorageOpen( } if( rc==SQLITE_OK && pConfig->bColumnsize ){ - rc = sqlite3Fts5CreateTable( - pConfig, "docsize", "id INTEGER PRIMARY KEY, sz BLOB", 0, pzErr - ); + const char *zCols = "id INTEGER PRIMARY KEY, sz BLOB"; + if( pConfig->bContentlessDelete ){ + zCols = "id INTEGER PRIMARY KEY, sz BLOB, origin INTEGER"; + } + rc = sqlite3Fts5CreateTable(pConfig, "docsize", zCols, 0, pzErr); } if( rc==SQLITE_OK ){ rc = sqlite3Fts5CreateTable( @@ -242673,7 +256911,7 @@ static int fts5StorageDeleteFromIndex( ){ Fts5Config *pConfig = p->pConfig; sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */ - int rc; /* Return code */ + int rc = SQLITE_OK; /* Return code */ int rc2; /* sqlite3_reset() return code */ int iCol; Fts5InsertCtx ctx; @@ -242689,7 +256927,6 @@ static int fts5StorageDeleteFromIndex( ctx.pStorage = p; ctx.iCol = -1; - rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel); for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ const char *zText; @@ -242726,6 +256963,37 @@ static int fts5StorageDeleteFromIndex( return rc; } +/* +** This function is called to process a DELETE on a contentless_delete=1 +** table. It adds the tombstone required to delete the entry with rowid +** iDel. If successful, SQLITE_OK is returned. Or, if an error occurs, +** an SQLite error code. +*/ +static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){ + i64 iOrigin = 0; + sqlite3_stmt *pLookup = 0; + int rc = SQLITE_OK; + + assert( p->pConfig->bContentlessDelete ); + assert( p->pConfig->eContent==FTS5_CONTENT_NONE ); + + /* Look up the origin of the document in the %_docsize table. Store + ** this in stack variable iOrigin. */ + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pLookup, 1, iDel); + if( SQLITE_ROW==sqlite3_step(pLookup) ){ + iOrigin = sqlite3_column_int64(pLookup, 1); + } + rc = sqlite3_reset(pLookup); + } + + if( rc==SQLITE_OK && iOrigin!=0 ){ + rc = sqlite3Fts5IndexContentlessDelete(p->pIndex, iOrigin, iDel); + } + + return rc; +} /* ** Insert a record into the %_docsize table. Specifically, do: @@ -242746,10 +257014,17 @@ static int fts5StorageInsertDocsize( rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pReplace, 1, iRowid); - sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); - sqlite3_step(pReplace); - rc = sqlite3_reset(pReplace); - sqlite3_bind_null(pReplace, 2); + if( p->pConfig->bContentlessDelete ){ + i64 iOrigin = 0; + rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin); + sqlite3_bind_int64(pReplace, 3, iOrigin); + } + if( rc==SQLITE_OK ){ + sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); + sqlite3_step(pReplace); + rc = sqlite3_reset(pReplace); + sqlite3_bind_null(pReplace, 2); + } } } return rc; @@ -242813,7 +257088,15 @@ static int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **ap /* Delete the index records */ if( rc==SQLITE_OK ){ - rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel); + } + + if( rc==SQLITE_OK ){ + if( p->pConfig->bContentlessDelete ){ + rc = fts5StorageContentlessDelete(p, iDel); + }else{ + rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + } } /* Delete the %_docsize record */ @@ -242890,7 +257173,7 @@ static int sqlite3Fts5StorageRebuild(Fts5Storage *p){ } if( rc==SQLITE_OK ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0); + rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, pConfig->pzErrmsg); } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){ @@ -243401,7 +257684,9 @@ static int sqlite3Fts5StorageSync(Fts5Storage *p){ i64 iLastRowid = sqlite3_last_insert_rowid(p->pConfig->db); if( p->bTotalsValid ){ rc = fts5StorageSaveTotals(p); - p->bTotalsValid = 0; + if( rc==SQLITE_OK ){ + p->bTotalsValid = 0; + } } if( rc==SQLITE_OK ){ rc = sqlite3Fts5IndexSync(p->pIndex); @@ -243675,6 +257960,12 @@ static const unsigned char sqlite3Utf8Trans1[] = { #endif /* ifndef SQLITE_AMALGAMATION */ +#define FTS5_SKIP_UTF8(zIn) { \ + if( ((unsigned char)(*(zIn++)))>=0xc0 ){ \ + while( (((unsigned char)*zIn) & 0xc0)==0x80 ){ zIn++; } \ + } \ +} + typedef struct Unicode61Tokenizer Unicode61Tokenizer; struct Unicode61Tokenizer { unsigned char aTokenChar[128]; /* ASCII range token characters */ @@ -244710,6 +259001,7 @@ static int fts5PorterTokenize( typedef struct TrigramTokenizer TrigramTokenizer; struct TrigramTokenizer { int bFold; /* True to fold to lower-case */ + int iFoldParam; /* Parameter to pass to Fts5UnicodeFold() */ }; /* @@ -244736,6 +259028,7 @@ static int fts5TriCreate( }else{ int i; pNew->bFold = 1; + pNew->iFoldParam = 0; for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); } + }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ + if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ + rc = SQLITE_ERROR; + }else{ + pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; + } }else{ rc = SQLITE_ERROR; } } + + if( pNew->iFoldParam!=0 && pNew->bFold==0 ){ + rc = SQLITE_ERROR; + } + if( rc!=SQLITE_OK ){ fts5TriDelete((Fts5Tokenizer*)pNew); pNew = 0; @@ -244770,40 +259074,62 @@ static int fts5TriTokenize( TrigramTokenizer *p = (TrigramTokenizer*)pTok; int rc = SQLITE_OK; char aBuf[32]; + char *zOut = aBuf; + int ii; const unsigned char *zIn = (const unsigned char*)pText; const unsigned char *zEof = &zIn[nText]; u32 iCode; + int aStart[3]; /* Input offset of each character in aBuf[] */ UNUSED_PARAM(unusedFlags); - while( 1 ){ - char *zOut = aBuf; - int iStart = zIn - (const unsigned char*)pText; - const unsigned char *zNext; - - READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - zNext = zIn; - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + + /* Populate aBuf[] with the characters for the first trigram. */ + for(ii=0; ii<3; ii++){ + do { + aStart[ii] = zIn - (const unsigned char*)pText; READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - }else{ - break; - } - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + if( iCode==0 ) return SQLITE_OK; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + WRITE_UTF8(zOut, iCode); + } + + /* At the start of each iteration of this loop: + ** + ** aBuf: Contains 3 characters. The 3 characters of the next trigram. + ** zOut: Points to the byte following the last character in aBuf. + ** aStart[3]: Contains the byte offset in the input text corresponding + ** to the start of each of the three characters in the buffer. + */ + assert( zIn<=zEof ); + while( 1 ){ + int iNext; /* Start of character following current tri */ + const char *z1; + + /* Read characters from the input up until the first non-diacritic */ + do { + iNext = zIn - (const unsigned char*)pText; READ_UTF8(zIn, zEof, iCode); if( iCode==0 ) break; - if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); - }else{ - break; - } - rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf); - if( rc!=SQLITE_OK ) break; - zIn = zNext; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + + /* Pass the current trigram back to fts5 */ + rc = xToken(pCtx, 0, aBuf, zOut-aBuf, aStart[0], iNext); + if( iCode==0 || rc!=SQLITE_OK ) break; + + /* Remove the first character from buffer aBuf[]. Append the character + ** with codepoint iCode. */ + z1 = aBuf; + FTS5_SKIP_UTF8(z1); + memmove(aBuf, z1, zOut - z1); + zOut -= (z1 - aBuf); + WRITE_UTF8(zOut, iCode); + + /* Update the aStart[] array */ + aStart[0] = aStart[1]; + aStart[1] = aStart[2]; + aStart[2] = iNext; } return rc; @@ -244826,7 +259152,9 @@ static int sqlite3Fts5TokenizerPattern( ){ if( xCreate==fts5TriCreate ){ TrigramTokenizer *p = (TrigramTokenizer*)pTok; - return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + if( p->iFoldParam==0 ){ + return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + } } return FTS5_PATTERN_NONE; } @@ -246615,7 +260943,7 @@ static int fts5VocabFilterMethod( if( pEq ){ zTerm = (const char *)sqlite3_value_text(pEq); nTerm = sqlite3_value_bytes(pEq); - f = 0; + f = FTS5INDEX_QUERY_NOTOKENDATA; }else{ if( pGe ){ zTerm = (const char *)sqlite3_value_text(pGe); @@ -246769,7 +261097,8 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){ /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ 0, - /* xShadowName */ 0 + /* xShadowName */ 0, + /* xIntegrity */ 0 }; void *p = (void*)pGlobal; @@ -246881,6 +261210,10 @@ static int stmtConnect( #define STMT_COLUMN_MEM 10 /* SQLITE_STMTSTATUS_MEMUSED */ + (void)pAux; + (void)argc; + (void)argv; + (void)pzErr; rc = sqlite3_declare_vtab(db, "CREATE TABLE x(sql,ncol,ro,busy,nscan,nsort,naidx,nstep," "reprep,run,mem)"); @@ -247000,6 +261333,10 @@ static int stmtFilter( sqlite3_int64 iRowid = 1; StmtRow **ppRow = 0; + (void)idxNum; + (void)idxStr; + (void)argc; + (void)argv; stmtCsrReset(pCur); ppRow = &pCur->pRow; for(p=sqlite3_next_stmt(pCur->db, 0); p; p=sqlite3_next_stmt(pCur->db, p)){ @@ -247055,6 +261392,7 @@ static int stmtBestIndex( sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo ){ + (void)tab; pIdxInfo->estimatedCost = (double)500; pIdxInfo->estimatedRows = 500; return SQLITE_OK; @@ -247089,6 +261427,7 @@ static sqlite3_module stmtModule = { 0, /* xRelease */ 0, /* xRollbackTo */ 0, /* xShadowName */ + 0 /* xIntegrity */ }; #endif /* SQLITE_OMIT_VIRTUALTABLE */ diff --git a/Support/GRDBDeploymentTarget.xcconfig b/Support/GRDBDeploymentTarget.xcconfig index a3458605c9..753467b40d 100644 --- a/Support/GRDBDeploymentTarget.xcconfig +++ b/Support/GRDBDeploymentTarget.xcconfig @@ -2,10 +2,11 @@ IPHONEOS_DEPLOYMENT_TARGET = 11.0 MACOSX_DEPLOYMENT_TARGET = 10.13 TVOS_DEPLOYMENT_TARGET = 11.0 WATCHOS_DEPLOYMENT_TARGET = 4.0 +OTHER_SWIFT_FLAGS = $(inherited) -D SQLITE_ENABLE_FTS5 //// Compile with all opt-in APIs //GCC_PREPROCESSOR_DEFINITIONS = $(inherited) GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1 -//OTHER_SWIFT_FLAGS = $(inherited) -D SQLITE_ENABLE_FTS5 -D SQLITE_ENABLE_PREUPDATE_HOOK +//OTHER_SWIFT_FLAGS = $(inherited) -D SQLITE_ENABLE_PREUPDATE_HOOK // Enable concurrency warning in Swift 5.5 // https://twitter.com/olebegemann/status/1421144304127463427?lang=en diff --git a/Support/Info.plist b/Support/Info.plist index 71d5879672..43d72a7017 100644 --- a/Support/Info.plist +++ b/Support/Info.plist @@ -15,7 +15,7 @@ CFBundlePackageType FMWK CFBundleShortVersionString - 6.6.0 + 6.27.0 CFBundleSignature ???? CFBundleVersion diff --git a/Tests/CocoaPods/GRDBiOS-framework/Podfile b/Tests/CocoaPods/GRDBiOS-framework/Podfile index 098cd81685..8a4c9e93a9 100644 --- a/Tests/CocoaPods/GRDBiOS-framework/Podfile +++ b/Tests/CocoaPods/GRDBiOS-framework/Podfile @@ -7,9 +7,26 @@ post_install do |installer| installer.pods_project.targets.select { |target| target.name == "GRDB.swift" }.each do |target| target.build_configurations.each do |config| # Enable extra GRDB APIs - config.build_settings['OTHER_SWIFT_FLAGS'] = "$(inherited) -D SQLITE_ENABLE_FTS5 -D SQLITE_ENABLE_PREUPDATE_HOOK" + config.build_settings['OTHER_SWIFT_FLAGS'] = "$(inherited) -D SQLITE_ENABLE_PREUPDATE_HOOK" # Enable extra SQLite APIs config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] = "$(inherited) GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1" end end + + # TODO: remove when https://github.com/CocoaPods/CocoaPods/pull/12009 is merged. + # https://github.com/CocoaPods/CocoaPods/issues/12012#issuecomment-1655191516 + installer.aggregate_targets.each do |target| + target.xcconfigs.each do |variant, xcconfig| + xcconfig_path = target.client_root + target.xcconfig_relative_path(variant) + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + installer.pods_project.targets.each do |target| + target.build_configurations.each do |config| + if config.base_configuration_reference.is_a? Xcodeproj::Project::Object::PBXFileReference + xcconfig_path = config.base_configuration_reference.real_path + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + end end diff --git a/Tests/CocoaPods/GRDBiOS-static/Podfile b/Tests/CocoaPods/GRDBiOS-static/Podfile index 351d375649..94614c25b1 100644 --- a/Tests/CocoaPods/GRDBiOS-static/Podfile +++ b/Tests/CocoaPods/GRDBiOS-static/Podfile @@ -6,9 +6,26 @@ post_install do |installer| installer.pods_project.targets.select { |target| target.name == "GRDB.swift" }.each do |target| target.build_configurations.each do |config| # Enable extra GRDB APIs - config.build_settings['OTHER_SWIFT_FLAGS'] = "$(inherited) -D SQLITE_ENABLE_FTS5 -D SQLITE_ENABLE_PREUPDATE_HOOK" + config.build_settings['OTHER_SWIFT_FLAGS'] = "$(inherited) -D SQLITE_ENABLE_PREUPDATE_HOOK" # Enable extra SQLite APIs config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] = "$(inherited) GRDB_SQLITE_ENABLE_PREUPDATE_HOOK=1" end end + + # TODO: remove when https://github.com/CocoaPods/CocoaPods/pull/12009 is merged. + # https://github.com/CocoaPods/CocoaPods/issues/12012#issuecomment-1655191516 + installer.aggregate_targets.each do |target| + target.xcconfigs.each do |variant, xcconfig| + xcconfig_path = target.client_root + target.xcconfig_relative_path(variant) + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + installer.pods_project.targets.each do |target| + target.build_configurations.each do |config| + if config.base_configuration_reference.is_a? Xcodeproj::Project::Object::PBXFileReference + xcconfig_path = config.base_configuration_reference.real_path + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + end end diff --git a/Tests/CocoaPods/SQLCipher3/GRDBTests.xcodeproj/project.pbxproj b/Tests/CocoaPods/SQLCipher3/GRDBTests.xcodeproj/project.pbxproj index c3b31290c3..2869b204eb 100644 --- a/Tests/CocoaPods/SQLCipher3/GRDBTests.xcodeproj/project.pbxproj +++ b/Tests/CocoaPods/SQLCipher3/GRDBTests.xcodeproj/project.pbxproj @@ -3,10 +3,24 @@ archiveVersion = 1; classes = { }; - objectVersion = 51; + objectVersion = 54; objects = { /* Begin PBXBuildFile section */ + 5603CECF2AC8636E00CF097D /* JSONExpressionsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CECE2AC8636E00CF097D /* JSONExpressionsTests.swift */; }; + 5603CED02AC8636E00CF097D /* JSONExpressionsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5603CECE2AC8636E00CF097D /* JSONExpressionsTests.swift */; }; + 56071DE52BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56071DE42BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift */; }; + 56071DE62BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56071DE42BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift */; }; + 561F38DD2AC891710051EEE9 /* JSONColumnTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38DC2AC891710051EEE9 /* JSONColumnTests.swift */; }; + 561F38DE2AC891710051EEE9 /* JSONColumnTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38DC2AC891710051EEE9 /* JSONColumnTests.swift */; }; + 561F38F92AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38F72AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift */; }; + 561F38FA2AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38F72AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift */; }; + 561F38FB2AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38F82AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift */; }; + 561F38FC2AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38F82AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift */; }; + 5623B61D2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B61B2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift */; }; + 5623B61E2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B61B2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift */; }; + 5623B61F2AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B61C2AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift */; }; + 5623B6202AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B61C2AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift */; }; 56419D6724A54062004967E1 /* DatabasePoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419C9C24A54053004967E1 /* DatabasePoolTests.swift */; }; 56419D6824A54062004967E1 /* DatabasePoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419C9C24A54053004967E1 /* DatabasePoolTests.swift */; }; 56419D6924A54062004967E1 /* ResultCodeTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419C9D24A54053004967E1 /* ResultCodeTests.swift */; }; @@ -443,6 +457,40 @@ 564A2151226B8E18001F64F1 /* Betty.jpeg in Resources */ = {isa = PBXBuildFile; fileRef = 564A1F6F226B89D6001F64F1 /* Betty.jpeg */; }; 565A27CC27871FFF00659A62 /* BackupTestCase.swift in Sources */ = {isa = PBXBuildFile; fileRef = 565A27CB27871FFF00659A62 /* BackupTestCase.swift */; }; 565A27CD27871FFF00659A62 /* BackupTestCase.swift in Sources */ = {isa = PBXBuildFile; fileRef = 565A27CB27871FFF00659A62 /* BackupTestCase.swift */; }; + 567B23172A29BFA400C61174 /* Issue1383.sqlite in Resources */ = {isa = PBXBuildFile; fileRef = 567B23162A29BFA400C61174 /* Issue1383.sqlite */; }; + 567B23182A29BFA500C61174 /* Issue1383.sqlite in Resources */ = {isa = PBXBuildFile; fileRef = 567B23162A29BFA400C61174 /* Issue1383.sqlite */; }; + 567B5C292AD32A2D00629622 /* CommonTableExpressionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1C2AD32A2D00629622 /* CommonTableExpressionTests.swift */; }; + 567B5C2A2AD32A2D00629622 /* CommonTableExpressionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1C2AD32A2D00629622 /* CommonTableExpressionTests.swift */; }; + 567B5C2B2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1D2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift */; }; + 567B5C2C2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1D2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift */; }; + 567B5C2D2AD32A2D00629622 /* SingletonRecordTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1E2AD32A2D00629622 /* SingletonRecordTest.swift */; }; + 567B5C2E2AD32A2D00629622 /* SingletonRecordTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1E2AD32A2D00629622 /* SingletonRecordTest.swift */; }; + 567B5C2F2AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1F2AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift */; }; + 567B5C302AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C1F2AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift */; }; + 567B5C312AD32A2D00629622 /* TableTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C202AD32A2D00629622 /* TableTests.swift */; }; + 567B5C322AD32A2D00629622 /* TableTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C202AD32A2D00629622 /* TableTests.swift */; }; + 567B5C332AD32A2D00629622 /* TransactionDateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C212AD32A2D00629622 /* TransactionDateTests.swift */; }; + 567B5C342AD32A2D00629622 /* TransactionDateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C212AD32A2D00629622 /* TransactionDateTests.swift */; }; + 567B5C352AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C222AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift */; }; + 567B5C362AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C222AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift */; }; + 567B5C372AD32A2D00629622 /* SQLExpressionIsConstantTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C232AD32A2D00629622 /* SQLExpressionIsConstantTests.swift */; }; + 567B5C382AD32A2D00629622 /* SQLExpressionIsConstantTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C232AD32A2D00629622 /* SQLExpressionIsConstantTests.swift */; }; + 567B5C392AD32A2D00629622 /* FoundationDecimalTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C242AD32A2D00629622 /* FoundationDecimalTests.swift */; }; + 567B5C3A2AD32A2D00629622 /* FoundationDecimalTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C242AD32A2D00629622 /* FoundationDecimalTests.swift */; }; + 567B5C3B2AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C252AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift */; }; + 567B5C3C2AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C252AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift */; }; + 567B5C3D2AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C262AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */; }; + 567B5C3E2AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C262AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */; }; + 567B5C3F2AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C272AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift */; }; + 567B5C402AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C272AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift */; }; + 567B5C412AD32A2D00629622 /* SharedValueObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C282AD32A2D00629622 /* SharedValueObservationTests.swift */; }; + 567B5C422AD32A2D00629622 /* SharedValueObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C282AD32A2D00629622 /* SharedValueObservationTests.swift */; }; + 567B5C4B2AD32F7000629622 /* DatabaseDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C492AD32F7000629622 /* DatabaseDumpTests.swift */; }; + 567B5C4C2AD32F7000629622 /* DatabaseDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C492AD32F7000629622 /* DatabaseDumpTests.swift */; }; + 567B5C4D2AD32F7000629622 /* DatabaseReaderDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C4A2AD32F7000629622 /* DatabaseReaderDumpTests.swift */; }; + 567B5C4E2AD32F7000629622 /* DatabaseReaderDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C4A2AD32F7000629622 /* DatabaseReaderDumpTests.swift */; }; + 568C3F7F2A5AB36900A2309D /* ForeignKeyDefinitionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F7E2A5AB36900A2309D /* ForeignKeyDefinitionTests.swift */; }; + 568C3F802A5AB36900A2309D /* ForeignKeyDefinitionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F7E2A5AB36900A2309D /* ForeignKeyDefinitionTests.swift */; }; 5691D97527257CE40021D540 /* AvailableElements.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5691D97427257CE40021D540 /* AvailableElements.swift */; }; 5691D97627257CE40021D540 /* AvailableElements.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5691D97427257CE40021D540 /* AvailableElements.swift */; }; 56F61DF0283D484700AF9884 /* getThreadsCount.c in Sources */ = {isa = PBXBuildFile; fileRef = 56F61DEE283D484700AF9884 /* getThreadsCount.c */; }; @@ -455,6 +503,13 @@ /* Begin PBXFileReference section */ 04298D834C818285823558AB /* Pods-GRDBTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GRDBTests.release.xcconfig"; path = "Target Support Files/Pods-GRDBTests/Pods-GRDBTests.release.xcconfig"; sourceTree = ""; }; 47C5D1B9AFFE795AA1D6EA5D /* Pods-GRDBTestsEncrypted.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GRDBTestsEncrypted.release.xcconfig"; path = "Target Support Files/Pods-GRDBTestsEncrypted/Pods-GRDBTestsEncrypted.release.xcconfig"; sourceTree = ""; }; + 5603CECE2AC8636E00CF097D /* JSONExpressionsTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONExpressionsTests.swift; sourceTree = ""; }; + 56071DE42BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SingletonUserDefaultsTest.swift; sourceTree = ""; }; + 561F38DC2AC891710051EEE9 /* JSONColumnTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONColumnTests.swift; sourceTree = ""; }; + 561F38F72AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataDecodingStrategyTests.swift; sourceTree = ""; }; + 561F38F82AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataEncodingStrategyTests.swift; sourceTree = ""; }; + 5623B61B2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueTemporaryCopyTests.swift; sourceTree = ""; }; + 5623B61C2AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueInMemoryCopyTests.swift; sourceTree = ""; }; 56419C9C24A54053004967E1 /* DatabasePoolTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabasePoolTests.swift; sourceTree = ""; }; 56419C9D24A54053004967E1 /* ResultCodeTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ResultCodeTests.swift; sourceTree = ""; }; 56419C9E24A54053004967E1 /* DatabaseQueueTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueTests.swift; sourceTree = ""; }; @@ -676,6 +731,23 @@ 564A1F6F226B89D6001F64F1 /* Betty.jpeg */ = {isa = PBXFileReference; lastKnownFileType = image.jpeg; path = Betty.jpeg; sourceTree = ""; }; 564A2156226B8E18001F64F1 /* GRDBTestsEncrypted.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = GRDBTestsEncrypted.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; 565A27CB27871FFF00659A62 /* BackupTestCase.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = BackupTestCase.swift; sourceTree = ""; }; + 567B23162A29BFA400C61174 /* Issue1383.sqlite */ = {isa = PBXFileReference; lastKnownFileType = file; path = Issue1383.sqlite; sourceTree = ""; }; + 567B5C1C2AD32A2D00629622 /* CommonTableExpressionTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CommonTableExpressionTests.swift; sourceTree = ""; }; + 567B5C1D2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLIdentifyingColumnsTests.swift; sourceTree = ""; }; + 567B5C1E2AD32A2D00629622 /* SingletonRecordTest.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SingletonRecordTest.swift; sourceTree = ""; }; + 567B5C1F2AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseColumnEncodingStrategyTests.swift; sourceTree = ""; }; + 567B5C202AD32A2D00629622 /* TableTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableTests.swift; sourceTree = ""; }; + 567B5C212AD32A2D00629622 /* TransactionDateTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TransactionDateTests.swift; sourceTree = ""; }; + 567B5C222AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseSnapshotPoolTests.swift; sourceTree = ""; }; + 567B5C232AD32A2D00629622 /* SQLExpressionIsConstantTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLExpressionIsConstantTests.swift; sourceTree = ""; }; + 567B5C242AD32A2D00629622 /* FoundationDecimalTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FoundationDecimalTests.swift; sourceTree = ""; }; + 567B5C252AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationPrefetchingRelationTests.swift; sourceTree = ""; }; + 567B5C262AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RecordMinimalNonOptionalPrimaryKeySingleTests.swift; sourceTree = ""; }; + 567B5C272AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CaseInsensitiveIdentifierTests.swift; sourceTree = ""; }; + 567B5C282AD32A2D00629622 /* SharedValueObservationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SharedValueObservationTests.swift; sourceTree = ""; }; + 567B5C492AD32F7000629622 /* DatabaseDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDumpTests.swift; sourceTree = ""; }; + 567B5C4A2AD32F7000629622 /* DatabaseReaderDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseReaderDumpTests.swift; sourceTree = ""; }; + 568C3F7E2A5AB36900A2309D /* ForeignKeyDefinitionTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ForeignKeyDefinitionTests.swift; sourceTree = ""; }; 5691D97427257CE40021D540 /* AvailableElements.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AvailableElements.swift; sourceTree = ""; }; 56F61DEC283D484700AF9884 /* GRDBTests-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "GRDBTests-Bridging-Header.h"; sourceTree = ""; }; 56F61DEE283D484700AF9884 /* getThreadsCount.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = getThreadsCount.c; sourceTree = ""; }; @@ -775,6 +847,7 @@ 56F61DEC283D484700AF9884 /* GRDBTests-Bridging-Header.h */, 564A1F6F226B89D6001F64F1 /* Betty.jpeg */, 56419CDA24A54058004967E1 /* InflectionsTests.json */, + 567B23162A29BFA400C61174 /* Issue1383.sqlite */, 56419CE724A54058004967E1 /* AnyCursorTests.swift */, 56419D2324A5405D004967E1 /* AssociationAggregateTests.swift */, 56419CC124A54056004967E1 /* AssociationBelongsToDecodableRecordTests.swift */, @@ -803,14 +876,17 @@ 56419D2C24A5405E004967E1 /* AssociationPrefetchingCodableRecordTests.swift */, 56419CC424A54056004967E1 /* AssociationPrefetchingFetchableRecordTests.swift */, 56419D2024A5405D004967E1 /* AssociationPrefetchingObservationTests.swift */, + 567B5C252AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift */, 56419CBA24A54055004967E1 /* AssociationPrefetchingRowTests.swift */, 56419CAD24A54054004967E1 /* AssociationPrefetchingSQLTests.swift */, 56419D5824A54061004967E1 /* AssociationRowScopeSearchTests.swift */, 56419D6324A54062004967E1 /* AssociationTableAliasTestsSQLTests.swift */, 565A27CB27871FFF00659A62 /* BackupTestCase.swift */, + 567B5C272AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift */, 56419CD424A54057004967E1 /* CGFloatTests.swift */, 56419CD924A54058004967E1 /* ColumnExpressionTests.swift */, 56419D5424A54061004967E1 /* ColumnInfoTests.swift */, + 567B5C1C2AD32A2D00629622 /* CommonTableExpressionTests.swift */, 56419D1924A5405C004967E1 /* CompilationProtocolTests.swift */, 56419CB824A54055004967E1 /* CompilationSubClassTests.swift */, 56419D3C24A5405F004967E1 /* CursorTests.swift */, @@ -818,10 +894,14 @@ 56419CCC24A54057004967E1 /* DatabaseAfterNextTransactionCommitTests.swift */, 56419D1724A5405C004967E1 /* DatabaseAggregateTests.swift */, 56419CE824A54059004967E1 /* DatabaseCollationTests.swift */, + 567B5C1F2AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift */, 56419D1124A5405C004967E1 /* DatabaseConfigurationTests.swift */, 56419CA424A54054004967E1 /* DatabaseCursorTests.swift */, + 561F38F72AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift */, + 561F38F82AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift */, 56419CCE24A54057004967E1 /* DatabaseDateDecodingStrategyTests.swift */, 56419CE424A54058004967E1 /* DatabaseDateEncodingStrategyTests.swift */, + 567B5C492AD32F7000629622 /* DatabaseDumpTests.swift */, 56419CAF24A54054004967E1 /* DatabaseErrorTests.swift */, 56419CB124A54054004967E1 /* DatabaseFunctionTests.swift */, 56419D0D24A5405B004967E1 /* DatabaseLogErrorTests.swift */, @@ -836,15 +916,19 @@ 56419C9C24A54053004967E1 /* DatabasePoolTests.swift */, 56419D4924A54060004967E1 /* DatabaseQueueBackupTests.swift */, 56419D2824A5405D004967E1 /* DatabaseQueueConcurrencyTests.swift */, + 5623B61C2AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift */, 56419CA024A54054004967E1 /* DatabaseQueueInMemoryTests.swift */, 56419CCF24A54057004967E1 /* DatabaseQueueReadOnlyTests.swift */, 56419D0A24A5405B004967E1 /* DatabaseQueueReleaseMemoryTests.swift */, 56419CEF24A54059004967E1 /* DatabaseQueueSchemaCacheTests.swift */, + 5623B61B2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift */, 56419C9E24A54053004967E1 /* DatabaseQueueTests.swift */, + 567B5C4A2AD32F7000629622 /* DatabaseReaderDumpTests.swift */, 56419D2924A5405D004967E1 /* DatabaseReaderTests.swift */, 56419CF724A5405A004967E1 /* DatabaseRegionObservationTests.swift */, 56419D2624A5405D004967E1 /* DatabaseRegionTests.swift */, 56419CB324A54055004967E1 /* DatabaseSavepointTests.swift */, + 567B5C222AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift */, 56419CE624A54058004967E1 /* DatabaseSnapshotTests.swift */, 56419CD624A54057004967E1 /* DatabaseSuspensionTests.swift */, 56419D2124A5405D004967E1 /* DatabaseTests.swift */, @@ -873,10 +957,12 @@ 56419CB724A54055004967E1 /* FetchRequestTests.swift */, 56419D5B24A54061004967E1 /* FilterCursorTests.swift */, 56419D4B24A54060004967E1 /* FlattenCursorTests.swift */, + 568C3F7E2A5AB36900A2309D /* ForeignKeyDefinitionTests.swift */, 56419CF124A54059004967E1 /* ForeignKeyInfoTests.swift */, 56419D2E24A5405E004967E1 /* FoundationDataTests.swift */, 56419D6624A54062004967E1 /* FoundationDateComponentsTests.swift */, 56419D3424A5405E004967E1 /* FoundationDateTests.swift */, + 567B5C242AD32A2D00629622 /* FoundationDecimalTests.swift */, 56419D6224A54062004967E1 /* FoundationNSDataTests.swift */, 56419CC924A54056004967E1 /* FoundationNSDateTests.swift */, 56419D0F24A5405B004967E1 /* FoundationNSDecimalNumberTests.swift */, @@ -903,6 +989,8 @@ 56419CDE24A54058004967E1 /* IndexInfoTests.swift */, 56419CF024A54059004967E1 /* InflectionsTests.swift */, 56419D0024A5405A004967E1 /* JoinSupportTests.swift */, + 561F38DC2AC891710051EEE9 /* JSONColumnTests.swift */, + 5603CECE2AC8636E00CF097D /* JSONExpressionsTests.swift */, 56419CDC24A54058004967E1 /* MapCursorTests.swift */, 56419D0224A5405A004967E1 /* MutablePersistableRecordChangesTests.swift */, 56419D2F24A5405E004967E1 /* MutablePersistableRecordEncodableTests.swift */, @@ -923,6 +1011,7 @@ 56419C9F24A54054004967E1 /* Record+QueryInterfaceRequestTests.swift */, 56419D0C24A5405B004967E1 /* RecordEditedTests.swift */, 56419CAE24A54054004967E1 /* RecordInitializersTests.swift */, + 567B5C262AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */, 56419D4224A5405F004967E1 /* RecordMinimalPrimaryKeyRowIDTests.swift */, 56419D0824A5405B004967E1 /* RecordMinimalPrimaryKeySingleTests.swift */, 56419D5E24A54061004967E1 /* RecordPersistenceConflictPolicy.swift */, @@ -946,7 +1035,12 @@ 56419D0E24A5405B004967E1 /* RowTestCase.swift */, 56419CEA24A54059004967E1 /* SchedulingWatchdogTests.swift */, 56419CFA24A5405A004967E1 /* SelectStatementTests.swift */, + 567B5C282AD32A2D00629622 /* SharedValueObservationTests.swift */, + 567B5C1E2AD32A2D00629622 /* SingletonRecordTest.swift */, + 56071DE42BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift */, + 567B5C232AD32A2D00629622 /* SQLExpressionIsConstantTests.swift */, 56419CCA24A54056004967E1 /* SQLExpressionLiteralTests.swift */, + 567B5C1D2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift */, 56419D0124A5405A004967E1 /* SQLLiteralTests.swift */, 56419D0B24A5405B004967E1 /* SQLRequestTests.swift */, 56419D4324A5405F004967E1 /* StatementArguments+FoundationTests.swift */, @@ -957,6 +1051,8 @@ 56419D3A24A5405F004967E1 /* TableRecordDeleteTests.swift */, 56419CA224A54054004967E1 /* TableRecordTests.swift */, 56419CD224A54057004967E1 /* TableRecordUpdateTests.swift */, + 567B5C202AD32A2D00629622 /* TableTests.swift */, + 567B5C212AD32A2D00629622 /* TransactionDateTests.swift */, 56419CB624A54055004967E1 /* TransactionObserverSavepointsTests.swift */, 56419D0924A5405B004967E1 /* TransactionObserverTests.swift */, 56419D2224A5405D004967E1 /* TruncateOptimizationTests.swift */, @@ -1086,6 +1182,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 567B23172A29BFA400C61174 /* Issue1383.sqlite in Resources */, 56419DE324A54062004967E1 /* InflectionsTests.json in Resources */, 564A2026226B89E1001F64F1 /* Betty.jpeg in Resources */, ); @@ -1095,6 +1192,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 567B23182A29BFA500C61174 /* Issue1383.sqlite in Resources */, 56419DE424A54062004967E1 /* InflectionsTests.json in Resources */, 564A2151226B8E18001F64F1 /* Betty.jpeg in Resources */, ); @@ -1192,8 +1290,10 @@ 56419E0724A54062004967E1 /* DatabasePoolBackupTests.swift in Sources */, 56419DAF24A54062004967E1 /* DatabaseMigratorTests.swift in Sources */, 56419E7924A54062004967E1 /* RecordPrimaryKeySingleWithReplaceConflictResolutionTests.swift in Sources */, + 5623B61D2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */, 56419EA124A54063004967E1 /* QueryInterfaceExtensibilityTests.swift in Sources */, 56419D8724A54062004967E1 /* RecordPrimaryKeyHiddenRowIDTests.swift in Sources */, + 567B5C352AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift in Sources */, 56419DC924A54062004967E1 /* AssociationChainSQLTests.swift in Sources */, 56419D8D24A54062004967E1 /* DatabaseErrorTests.swift in Sources */, 56419DEB24A54062004967E1 /* IndexInfoTests.swift in Sources */, @@ -1202,9 +1302,13 @@ 56419EA524A54063004967E1 /* MutablePersistableRecordTests.swift in Sources */, 56419DC124A54062004967E1 /* FoundationNSDateTests.swift in Sources */, 56419EC524A54063004967E1 /* FlattenCursorTests.swift in Sources */, + 567B5C3D2AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */, 56419E9D24A54063004967E1 /* StatementArgumentsTests.swift in Sources */, + 568C3F7F2A5AB36900A2309D /* ForeignKeyDefinitionTests.swift in Sources */, 5641A1AA24A540D6004967E1 /* Recorder.swift in Sources */, 56419D7B24A54062004967E1 /* FetchableRecord+QueryInterfaceRequestTests.swift in Sources */, + 567B5C4B2AD32F7000629622 /* DatabaseDumpTests.swift in Sources */, + 567B5C2D2AD32A2D00629622 /* SingletonRecordTest.swift in Sources */, 56419D6B24A54062004967E1 /* DatabaseQueueTests.swift in Sources */, 56419E2324A54062004967E1 /* SelectStatementTests.swift in Sources */, 56419E4B24A54062004967E1 /* RowTestCase.swift in Sources */, @@ -1215,6 +1319,7 @@ 56419DF324A54062004967E1 /* AssociationBelongsToSQLDerivationTests.swift in Sources */, 56419E6724A54062004967E1 /* AssociationHasOneThroughDecodableRecordTests.swift in Sources */, 56419E4D24A54062004967E1 /* FoundationNSDecimalNumberTests.swift in Sources */, + 561F38F92AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */, 56419E6B24A54062004967E1 /* ValueObservationFetchTests.swift in Sources */, 56419D8524A54062004967E1 /* FTS4RecordTests.swift in Sources */, 5641A1B224A540D6004967E1 /* Next.swift in Sources */, @@ -1246,21 +1351,26 @@ 56419ED924A54063004967E1 /* AssociationHasOneSQLTests.swift in Sources */, 56419EAB24A54063004967E1 /* DatabaseValueTests.swift in Sources */, 56419D7724A54062004967E1 /* DatabaseCursorTests.swift in Sources */, + 5603CECF2AC8636E00CF097D /* JSONExpressionsTests.swift in Sources */, 56419E1524A54062004967E1 /* AssociationChainRowScopesTests.swift in Sources */, + 567B5C332AD32A2D00629622 /* TransactionDateTests.swift in Sources */, 56F61DF0283D484700AF9884 /* getThreadsCount.c in Sources */, 56419EC724A54063004967E1 /* RecordPrimaryKeyMultipleTests.swift in Sources */, 56419DF724A54062004967E1 /* DatabaseDateEncodingStrategyTests.swift in Sources */, + 5623B61F2AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */, 56419EB724A54063004967E1 /* MutablePersistableRecordPersistenceConflictPolicyTests.swift in Sources */, 56419E7524A54062004967E1 /* AssociationAggregateTests.swift in Sources */, 56419DD924A54062004967E1 /* AssociationBelongsToSQLTests.swift in Sources */, 56419DA524A54062004967E1 /* QueryInterfacePromiseTests.swift in Sources */, 56419EE324A54063004967E1 /* Row+FoundationTests.swift in Sources */, 56419E7124A54062004967E1 /* DatabaseTests.swift in Sources */, + 561F38DD2AC891710051EEE9 /* JSONColumnTests.swift in Sources */, 56419E9524A54063004967E1 /* PrefixWhileCursorTests.swift in Sources */, 56419E2F24A54062004967E1 /* JoinSupportTests.swift in Sources */, 56419E0524A54062004967E1 /* AssociationParallelSQLTests.swift in Sources */, 56419D8F24A54062004967E1 /* RecordUniqueIndexTests.swift in Sources */, 56419E4F24A54062004967E1 /* DatabaseValueConvertibleEscapingTests.swift in Sources */, + 567B5C412AD32A2D00629622 /* SharedValueObservationTests.swift in Sources */, 56419E9F24A54063004967E1 /* FetchableRecordTests.swift in Sources */, 56419E3324A54062004967E1 /* MutablePersistableRecordChangesTests.swift in Sources */, 56419DFF24A54062004967E1 /* DatabaseCollationTests.swift in Sources */, @@ -1269,12 +1379,14 @@ 56419E9324A54063004967E1 /* DatabaseValueConvertibleDecodableTests.swift in Sources */, 5641A1B424A540D6004967E1 /* Recording.swift in Sources */, 56419E8524A54063004967E1 /* FTS3PatternTests.swift in Sources */, + 567B5C312AD32A2D00629622 /* TableTests.swift in Sources */, 56419D8324A54062004967E1 /* FetchableRecordDecodableTests.swift in Sources */, 56419D8924A54062004967E1 /* AssociationPrefetchingSQLTests.swift in Sources */, 56419E5F24A54062004967E1 /* DatabaseUUIDEncodingStrategyTests.swift in Sources */, 56419E6F24A54062004967E1 /* AssociationPrefetchingObservationTests.swift in Sources */, 56419E2D24A54062004967E1 /* AssociationHasManyThroughRowScopeTests.swift in Sources */, 56419ECD24A54063004967E1 /* OrderedDictionaryTests.swift in Sources */, + 567B5C2B2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift in Sources */, 56419D6924A54062004967E1 /* ResultCodeTests.swift in Sources */, 56419DB724A54062004967E1 /* AssociationPrefetchingFetchableRecordTests.swift in Sources */, 56419D9124A54062004967E1 /* DatabaseFunctionTests.swift in Sources */, @@ -1293,6 +1405,7 @@ 56419DCB24A54062004967E1 /* DatabaseDateDecodingStrategyTests.swift in Sources */, 56419E3924A54062004967E1 /* ValueObservationTests.swift in Sources */, 56419DCF24A54062004967E1 /* GRDBTestCase.swift in Sources */, + 567B5C372AD32A2D00629622 /* SQLExpressionIsConstantTests.swift in Sources */, 56419EE524A54063004967E1 /* FilterCursorTests.swift in Sources */, 56419E4524A54062004967E1 /* SQLRequestTests.swift in Sources */, 56419ECF24A54063004967E1 /* AssociationParallelRowScopesTests.swift in Sources */, @@ -1301,6 +1414,7 @@ 56419E6124A54062004967E1 /* CompilationProtocolTests.swift in Sources */, 56419DE724A54062004967E1 /* MapCursorTests.swift in Sources */, 56419E2924A54062004967E1 /* NumericOverflowTests.swift in Sources */, + 567B5C2F2AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift in Sources */, 5641A1AC24A540D6004967E1 /* RecordingError.swift in Sources */, 56419D7D24A54062004967E1 /* ValueObservationMapTests.swift in Sources */, 56419EAF24A54063004967E1 /* RowFromDictionaryTests.swift in Sources */, @@ -1310,6 +1424,7 @@ 5641A1A824A540D6004967E1 /* ValueObservationPublisherTests.swift in Sources */, 56419E2524A54062004967E1 /* QueryInterfaceExpressionsTests.swift in Sources */, 56419D8B24A54062004967E1 /* RecordInitializersTests.swift in Sources */, + 567B5C292AD32A2D00629622 /* CommonTableExpressionTests.swift in Sources */, 56419E8924A54063004967E1 /* AssociationParallelDecodableRecordTests.swift in Sources */, 56419E6D24A54062004967E1 /* DatabasePoolReadOnlyTests.swift in Sources */, 56419D9524A54062004967E1 /* DatabaseSavepointTests.swift in Sources */, @@ -1320,10 +1435,13 @@ 56419E0924A54062004967E1 /* DatabaseValueConversionTests.swift in Sources */, 56419DB124A54062004967E1 /* AssociationBelongsToDecodableRecordTests.swift in Sources */, 56419D9324A54062004967E1 /* PrimaryKeyInfoTests.swift in Sources */, + 567B5C392AD32A2D00629622 /* FoundationDecimalTests.swift in Sources */, 5641A1BA24A540D6004967E1 /* Inverted.swift in Sources */, 5641A1B624A540D6004967E1 /* Prefix.swift in Sources */, 56419E4124A54062004967E1 /* TransactionObserverTests.swift in Sources */, 56419DDB24A54062004967E1 /* DatabaseSuspensionTests.swift in Sources */, + 567B5C3B2AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift in Sources */, + 567B5C4D2AD32F7000629622 /* DatabaseReaderDumpTests.swift in Sources */, 56419DFB24A54062004967E1 /* DatabaseSnapshotTests.swift in Sources */, 56419DDD24A54062004967E1 /* FTS5PatternTests.swift in Sources */, 56419EBB24A54063004967E1 /* AssociationHasManyThroughSQLTests.swift in Sources */, @@ -1345,6 +1463,7 @@ 56419E0324A54062004967E1 /* SchedulingWatchdogTests.swift in Sources */, 56419E7D24A54063004967E1 /* DropWhileCursorTests.swift in Sources */, 56419EBD24A54063004967E1 /* AssociationHasOneThroughSQLDerivationTests.swift in Sources */, + 56071DE52BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift in Sources */, 56419E8F24A54063004967E1 /* AssociationHasManySQLTests.swift in Sources */, 5641A1A424A540D6004967E1 /* Support.swift in Sources */, 5641A1BC24A540D6004967E1 /* PublisherExpectation.swift in Sources */, @@ -1368,10 +1487,12 @@ 56419DF524A54062004967E1 /* DatabaseTimestampTests.swift in Sources */, 56419ED724A54063004967E1 /* ColumnInfoTests.swift in Sources */, 56419D9F24A54062004967E1 /* CompilationSubClassTests.swift in Sources */, + 567B5C3F2AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift in Sources */, 56419DBB24A54062004967E1 /* ValueObservationRegionRecordingTests.swift in Sources */, 56419EA924A54063004967E1 /* DatabasePoolReleaseMemoryTests.swift in Sources */, 56419E7F24A54063004967E1 /* DatabaseQueueConcurrencyTests.swift in Sources */, 56419EF124A54063004967E1 /* StatementColumnConvertibleFetchTests.swift in Sources */, + 561F38FB2AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */, 56419EA324A54063004967E1 /* TableRecordDeleteTests.swift in Sources */, 56419EFB24A54063004967E1 /* FoundationDateComponentsTests.swift in Sources */, 56419E3D24A54062004967E1 /* FTS3TableBuilderTests.swift in Sources */, @@ -1417,8 +1538,10 @@ 56419E0824A54062004967E1 /* DatabasePoolBackupTests.swift in Sources */, 56419DB024A54062004967E1 /* DatabaseMigratorTests.swift in Sources */, 56419E7A24A54062004967E1 /* RecordPrimaryKeySingleWithReplaceConflictResolutionTests.swift in Sources */, + 5623B61E2AED39F700436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */, 56419EA224A54063004967E1 /* QueryInterfaceExtensibilityTests.swift in Sources */, 56419D8824A54062004967E1 /* RecordPrimaryKeyHiddenRowIDTests.swift in Sources */, + 567B5C362AD32A2D00629622 /* DatabaseSnapshotPoolTests.swift in Sources */, 56419DCA24A54062004967E1 /* AssociationChainSQLTests.swift in Sources */, 56419D8E24A54062004967E1 /* DatabaseErrorTests.swift in Sources */, 56419DEC24A54062004967E1 /* IndexInfoTests.swift in Sources */, @@ -1427,9 +1550,13 @@ 56419EA624A54063004967E1 /* MutablePersistableRecordTests.swift in Sources */, 56419DC224A54062004967E1 /* FoundationNSDateTests.swift in Sources */, 56419EC624A54063004967E1 /* FlattenCursorTests.swift in Sources */, + 567B5C3E2AD32A2D00629622 /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */, 56419E9E24A54063004967E1 /* StatementArgumentsTests.swift in Sources */, + 568C3F802A5AB36900A2309D /* ForeignKeyDefinitionTests.swift in Sources */, 5641A1AB24A540D6004967E1 /* Recorder.swift in Sources */, 56419D7C24A54062004967E1 /* FetchableRecord+QueryInterfaceRequestTests.swift in Sources */, + 567B5C4C2AD32F7000629622 /* DatabaseDumpTests.swift in Sources */, + 567B5C2E2AD32A2D00629622 /* SingletonRecordTest.swift in Sources */, 56419D6C24A54062004967E1 /* DatabaseQueueTests.swift in Sources */, 56419E2424A54062004967E1 /* SelectStatementTests.swift in Sources */, 56419E4C24A54062004967E1 /* RowTestCase.swift in Sources */, @@ -1440,6 +1567,7 @@ 56419DF424A54062004967E1 /* AssociationBelongsToSQLDerivationTests.swift in Sources */, 56419E6824A54062004967E1 /* AssociationHasOneThroughDecodableRecordTests.swift in Sources */, 56419E4E24A54062004967E1 /* FoundationNSDecimalNumberTests.swift in Sources */, + 561F38FA2AC9CE6D0051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */, 56419E6C24A54062004967E1 /* ValueObservationFetchTests.swift in Sources */, 56419D8624A54062004967E1 /* FTS4RecordTests.swift in Sources */, 5641A1B324A540D6004967E1 /* Next.swift in Sources */, @@ -1471,21 +1599,26 @@ 56419EDA24A54063004967E1 /* AssociationHasOneSQLTests.swift in Sources */, 56419EAC24A54063004967E1 /* DatabaseValueTests.swift in Sources */, 56419D7824A54062004967E1 /* DatabaseCursorTests.swift in Sources */, + 5603CED02AC8636E00CF097D /* JSONExpressionsTests.swift in Sources */, 56419E1624A54062004967E1 /* AssociationChainRowScopesTests.swift in Sources */, + 567B5C342AD32A2D00629622 /* TransactionDateTests.swift in Sources */, 56F61DF1283D484700AF9884 /* getThreadsCount.c in Sources */, 56419EC824A54063004967E1 /* RecordPrimaryKeyMultipleTests.swift in Sources */, 56419DF824A54062004967E1 /* DatabaseDateEncodingStrategyTests.swift in Sources */, + 5623B6202AED39F700436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */, 56419EB824A54063004967E1 /* MutablePersistableRecordPersistenceConflictPolicyTests.swift in Sources */, 56419E7624A54062004967E1 /* AssociationAggregateTests.swift in Sources */, 56419DDA24A54062004967E1 /* AssociationBelongsToSQLTests.swift in Sources */, 56419DA624A54062004967E1 /* QueryInterfacePromiseTests.swift in Sources */, 56419EE424A54063004967E1 /* Row+FoundationTests.swift in Sources */, 56419E7224A54062004967E1 /* DatabaseTests.swift in Sources */, + 561F38DE2AC891710051EEE9 /* JSONColumnTests.swift in Sources */, 56419E9624A54063004967E1 /* PrefixWhileCursorTests.swift in Sources */, 56419E3024A54062004967E1 /* JoinSupportTests.swift in Sources */, 56419E0624A54062004967E1 /* AssociationParallelSQLTests.swift in Sources */, 56419D9024A54062004967E1 /* RecordUniqueIndexTests.swift in Sources */, 56419E5024A54062004967E1 /* DatabaseValueConvertibleEscapingTests.swift in Sources */, + 567B5C422AD32A2D00629622 /* SharedValueObservationTests.swift in Sources */, 56419EA024A54063004967E1 /* FetchableRecordTests.swift in Sources */, 56419E3424A54062004967E1 /* MutablePersistableRecordChangesTests.swift in Sources */, 56419E0024A54062004967E1 /* DatabaseCollationTests.swift in Sources */, @@ -1494,12 +1627,14 @@ 56419E9424A54063004967E1 /* DatabaseValueConvertibleDecodableTests.swift in Sources */, 5641A1B524A540D6004967E1 /* Recording.swift in Sources */, 56419E8624A54063004967E1 /* FTS3PatternTests.swift in Sources */, + 567B5C322AD32A2D00629622 /* TableTests.swift in Sources */, 56419D8424A54062004967E1 /* FetchableRecordDecodableTests.swift in Sources */, 56419D8A24A54062004967E1 /* AssociationPrefetchingSQLTests.swift in Sources */, 56419E6024A54062004967E1 /* DatabaseUUIDEncodingStrategyTests.swift in Sources */, 56419E7024A54062004967E1 /* AssociationPrefetchingObservationTests.swift in Sources */, 56419E2E24A54062004967E1 /* AssociationHasManyThroughRowScopeTests.swift in Sources */, 56419ECE24A54063004967E1 /* OrderedDictionaryTests.swift in Sources */, + 567B5C2C2AD32A2D00629622 /* SQLIdentifyingColumnsTests.swift in Sources */, 56419D6A24A54062004967E1 /* ResultCodeTests.swift in Sources */, 56419DB824A54062004967E1 /* AssociationPrefetchingFetchableRecordTests.swift in Sources */, 56419D9224A54062004967E1 /* DatabaseFunctionTests.swift in Sources */, @@ -1518,6 +1653,7 @@ 56419DCC24A54062004967E1 /* DatabaseDateDecodingStrategyTests.swift in Sources */, 56419E3A24A54062004967E1 /* ValueObservationTests.swift in Sources */, 56419DD024A54062004967E1 /* GRDBTestCase.swift in Sources */, + 567B5C382AD32A2D00629622 /* SQLExpressionIsConstantTests.swift in Sources */, 56419EE624A54063004967E1 /* FilterCursorTests.swift in Sources */, 56419E4624A54062004967E1 /* SQLRequestTests.swift in Sources */, 56419ED024A54063004967E1 /* AssociationParallelRowScopesTests.swift in Sources */, @@ -1526,6 +1662,7 @@ 56419E6224A54062004967E1 /* CompilationProtocolTests.swift in Sources */, 56419DE824A54062004967E1 /* MapCursorTests.swift in Sources */, 56419E2A24A54062004967E1 /* NumericOverflowTests.swift in Sources */, + 567B5C302AD32A2D00629622 /* DatabaseColumnEncodingStrategyTests.swift in Sources */, 5641A1AD24A540D6004967E1 /* RecordingError.swift in Sources */, 56419D7E24A54062004967E1 /* ValueObservationMapTests.swift in Sources */, 56419EB024A54063004967E1 /* RowFromDictionaryTests.swift in Sources */, @@ -1535,6 +1672,7 @@ 5641A1A924A540D6004967E1 /* ValueObservationPublisherTests.swift in Sources */, 56419E2624A54062004967E1 /* QueryInterfaceExpressionsTests.swift in Sources */, 56419D8C24A54062004967E1 /* RecordInitializersTests.swift in Sources */, + 567B5C2A2AD32A2D00629622 /* CommonTableExpressionTests.swift in Sources */, 56419E8A24A54063004967E1 /* AssociationParallelDecodableRecordTests.swift in Sources */, 56419E6E24A54062004967E1 /* DatabasePoolReadOnlyTests.swift in Sources */, 56419D9624A54062004967E1 /* DatabaseSavepointTests.swift in Sources */, @@ -1545,10 +1683,13 @@ 56419E0A24A54062004967E1 /* DatabaseValueConversionTests.swift in Sources */, 56419DB224A54062004967E1 /* AssociationBelongsToDecodableRecordTests.swift in Sources */, 56419D9424A54062004967E1 /* PrimaryKeyInfoTests.swift in Sources */, + 567B5C3A2AD32A2D00629622 /* FoundationDecimalTests.swift in Sources */, 5641A1BB24A540D6004967E1 /* Inverted.swift in Sources */, 5641A1B724A540D6004967E1 /* Prefix.swift in Sources */, 56419E4224A54062004967E1 /* TransactionObserverTests.swift in Sources */, 56419DDC24A54062004967E1 /* DatabaseSuspensionTests.swift in Sources */, + 567B5C3C2AD32A2D00629622 /* AssociationPrefetchingRelationTests.swift in Sources */, + 567B5C4E2AD32F7000629622 /* DatabaseReaderDumpTests.swift in Sources */, 56419DFC24A54062004967E1 /* DatabaseSnapshotTests.swift in Sources */, 56419DDE24A54062004967E1 /* FTS5PatternTests.swift in Sources */, 56419EBC24A54063004967E1 /* AssociationHasManyThroughSQLTests.swift in Sources */, @@ -1570,6 +1711,7 @@ 56419E0424A54062004967E1 /* SchedulingWatchdogTests.swift in Sources */, 56419E7E24A54063004967E1 /* DropWhileCursorTests.swift in Sources */, 56419EBE24A54063004967E1 /* AssociationHasOneThroughSQLDerivationTests.swift in Sources */, + 56071DE62BD3DDB5000802B6 /* SingletonUserDefaultsTest.swift in Sources */, 56419E9024A54063004967E1 /* AssociationHasManySQLTests.swift in Sources */, 5641A1A524A540D6004967E1 /* Support.swift in Sources */, 5641A1BD24A540D6004967E1 /* PublisherExpectation.swift in Sources */, @@ -1593,10 +1735,12 @@ 56419DF624A54062004967E1 /* DatabaseTimestampTests.swift in Sources */, 56419ED824A54063004967E1 /* ColumnInfoTests.swift in Sources */, 56419DA024A54062004967E1 /* CompilationSubClassTests.swift in Sources */, + 567B5C402AD32A2D00629622 /* CaseInsensitiveIdentifierTests.swift in Sources */, 56419DBC24A54062004967E1 /* ValueObservationRegionRecordingTests.swift in Sources */, 56419EAA24A54063004967E1 /* DatabasePoolReleaseMemoryTests.swift in Sources */, 56419E8024A54063004967E1 /* DatabaseQueueConcurrencyTests.swift in Sources */, 56419EF224A54063004967E1 /* StatementColumnConvertibleFetchTests.swift in Sources */, + 561F38FC2AC9CE6D0051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */, 56419EA424A54063004967E1 /* TableRecordDeleteTests.swift in Sources */, 56419EFC24A54063004967E1 /* FoundationDateComponentsTests.swift in Sources */, 56419E3E24A54062004967E1 /* FTS3TableBuilderTests.swift in Sources */, diff --git a/Tests/CocoaPods/SQLCipher3/Podfile b/Tests/CocoaPods/SQLCipher3/Podfile index c8066b3ae0..fbe38136b0 100644 --- a/Tests/CocoaPods/SQLCipher3/Podfile +++ b/Tests/CocoaPods/SQLCipher3/Podfile @@ -17,7 +17,27 @@ end post_install do |installer| installer.pods_project.targets.each do |target| target.build_configurations.each do |config| + # Workaround for Xcode 14.3+ + # https://github.com/CocoaPods/CocoaPods/issues/11839 + config.build_settings['MACOSX_DEPLOYMENT_TARGET'] = '10.13' config.build_settings['GCC_OPTIMIZATION_LEVEL'] = '3' end end + + # TODO: remove when https://github.com/CocoaPods/CocoaPods/pull/12009 is merged. + # https://github.com/CocoaPods/CocoaPods/issues/12012#issuecomment-1655191516 + installer.aggregate_targets.each do |target| + target.xcconfigs.each do |variant, xcconfig| + xcconfig_path = target.client_root + target.xcconfig_relative_path(variant) + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + installer.pods_project.targets.each do |target| + target.build_configurations.each do |config| + if config.base_configuration_reference.is_a? Xcodeproj::Project::Object::PBXFileReference + xcconfig_path = config.base_configuration_reference.real_path + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + end end diff --git a/Tests/CocoaPods/SQLCipher4/GRDBTests.xcodeproj/project.pbxproj b/Tests/CocoaPods/SQLCipher4/GRDBTests.xcodeproj/project.pbxproj index a92a670974..f8c3e463fe 100644 --- a/Tests/CocoaPods/SQLCipher4/GRDBTests.xcodeproj/project.pbxproj +++ b/Tests/CocoaPods/SQLCipher4/GRDBTests.xcodeproj/project.pbxproj @@ -3,10 +3,24 @@ archiveVersion = 1; classes = { }; - objectVersion = 51; + objectVersion = 54; objects = { /* Begin PBXBuildFile section */ + 56071DE22BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56071DE12BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift */; }; + 56071DE32BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56071DE12BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift */; }; + 561F38E12AC891890051EEE9 /* JSONExpressionsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38DF2AC891890051EEE9 /* JSONExpressionsTests.swift */; }; + 561F38E22AC891890051EEE9 /* JSONExpressionsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38DF2AC891890051EEE9 /* JSONExpressionsTests.swift */; }; + 561F38E32AC891890051EEE9 /* JSONColumnTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38E02AC891890051EEE9 /* JSONColumnTests.swift */; }; + 561F38E42AC891890051EEE9 /* JSONColumnTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38E02AC891890051EEE9 /* JSONColumnTests.swift */; }; + 561F38FF2AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38FD2AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift */; }; + 561F39002AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38FD2AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift */; }; + 561F39012AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38FE2AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift */; }; + 561F39022AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 561F38FE2AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift */; }; + 5623B6232AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6212AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift */; }; + 5623B6242AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6212AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift */; }; + 5623B6252AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6222AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift */; }; + 5623B6262AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5623B6222AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift */; }; 56419FC824A540A1004967E1 /* FetchRequestTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419EFD24A54093004967E1 /* FetchRequestTests.swift */; }; 56419FC924A540A1004967E1 /* FetchRequestTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419EFD24A54093004967E1 /* FetchRequestTests.swift */; }; 56419FCA24A540A1004967E1 /* DatabasePoolBackupTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56419EFE24A54093004967E1 /* DatabasePoolBackupTests.swift */; }; @@ -445,6 +459,40 @@ 564A215A226C8F25001F64F1 /* db.SQLCipher3 in Resources */ = {isa = PBXBuildFile; fileRef = 564A2158226C8F24001F64F1 /* db.SQLCipher3 */; }; 565A27C927871FE500659A62 /* BackupTestCase.swift in Sources */ = {isa = PBXBuildFile; fileRef = 565A27C827871FE500659A62 /* BackupTestCase.swift */; }; 565A27CA27871FE500659A62 /* BackupTestCase.swift in Sources */ = {isa = PBXBuildFile; fileRef = 565A27C827871FE500659A62 /* BackupTestCase.swift */; }; + 567B231A2A29BFE100C61174 /* Issue1383.sqlite in Resources */ = {isa = PBXBuildFile; fileRef = 567B23192A29BFE100C61174 /* Issue1383.sqlite */; }; + 567B231B2A29BFE100C61174 /* Issue1383.sqlite in Resources */ = {isa = PBXBuildFile; fileRef = 567B23192A29BFE100C61174 /* Issue1383.sqlite */; }; + 567B5C102AD32A0000629622 /* SingletonRecordTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0A2AD32A0000629622 /* SingletonRecordTest.swift */; }; + 567B5C112AD32A0000629622 /* SingletonRecordTest.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0A2AD32A0000629622 /* SingletonRecordTest.swift */; }; + 567B5C122AD32A0000629622 /* SharedValueObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0B2AD32A0000629622 /* SharedValueObservationTests.swift */; }; + 567B5C132AD32A0000629622 /* SharedValueObservationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0B2AD32A0000629622 /* SharedValueObservationTests.swift */; }; + 567B5C142AD32A0000629622 /* SQLExpressionIsConstantTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0C2AD32A0000629622 /* SQLExpressionIsConstantTests.swift */; }; + 567B5C152AD32A0000629622 /* SQLExpressionIsConstantTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0C2AD32A0000629622 /* SQLExpressionIsConstantTests.swift */; }; + 567B5C162AD32A0000629622 /* SQLIdentifyingColumnsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0D2AD32A0000629622 /* SQLIdentifyingColumnsTests.swift */; }; + 567B5C172AD32A0000629622 /* SQLIdentifyingColumnsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0D2AD32A0000629622 /* SQLIdentifyingColumnsTests.swift */; }; + 567B5C182AD32A0000629622 /* TransactionDateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0E2AD32A0000629622 /* TransactionDateTests.swift */; }; + 567B5C192AD32A0000629622 /* TransactionDateTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0E2AD32A0000629622 /* TransactionDateTests.swift */; }; + 567B5C1A2AD32A0000629622 /* TableTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0F2AD32A0000629622 /* TableTests.swift */; }; + 567B5C1B2AD32A0000629622 /* TableTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C0F2AD32A0000629622 /* TableTests.swift */; }; + 567B5C452AD32F5900629622 /* DatabaseDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C432AD32F5900629622 /* DatabaseDumpTests.swift */; }; + 567B5C462AD32F5900629622 /* DatabaseDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C432AD32F5900629622 /* DatabaseDumpTests.swift */; }; + 567B5C472AD32F5900629622 /* DatabaseReaderDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C442AD32F5900629622 /* DatabaseReaderDumpTests.swift */; }; + 567B5C482AD32F5900629622 /* DatabaseReaderDumpTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 567B5C442AD32F5900629622 /* DatabaseReaderDumpTests.swift */; }; + 568C3F822A5AB38300A2309D /* ForeignKeyDefinitionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F812A5AB38300A2309D /* ForeignKeyDefinitionTests.swift */; }; + 568C3F832A5AB38300A2309D /* ForeignKeyDefinitionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F812A5AB38300A2309D /* ForeignKeyDefinitionTests.swift */; }; + 568C3F8B2A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F842A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift */; }; + 568C3F8C2A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F842A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift */; }; + 568C3F8D2A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F852A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift */; }; + 568C3F8E2A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F852A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift */; }; + 568C3F8F2A5AB3A800A2309D /* FoundationDecimalTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F862A5AB3A800A2309D /* FoundationDecimalTests.swift */; }; + 568C3F902A5AB3A800A2309D /* FoundationDecimalTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F862A5AB3A800A2309D /* FoundationDecimalTests.swift */; }; + 568C3F912A5AB3A800A2309D /* CommonTableExpressionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F872A5AB3A800A2309D /* CommonTableExpressionTests.swift */; }; + 568C3F922A5AB3A800A2309D /* CommonTableExpressionTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F872A5AB3A800A2309D /* CommonTableExpressionTests.swift */; }; + 568C3F932A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F882A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift */; }; + 568C3F942A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F882A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift */; }; + 568C3F952A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F892A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift */; }; + 568C3F962A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F892A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift */; }; + 568C3F972A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F8A2A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */; }; + 568C3F982A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 568C3F8A2A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */; }; 5691D97227257C930021D540 /* AvailableElements.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5691D97127257C930021D540 /* AvailableElements.swift */; }; 5691D97327257C930021D540 /* AvailableElements.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5691D97127257C930021D540 /* AvailableElements.swift */; }; 56F61DF6283D4AB100AF9884 /* getThreadsCount.c in Sources */ = {isa = PBXBuildFile; fileRef = 56F61DF4283D4AB100AF9884 /* getThreadsCount.c */; }; @@ -457,6 +505,13 @@ /* Begin PBXFileReference section */ 04298D834C818285823558AB /* Pods-GRDBTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GRDBTests.release.xcconfig"; path = "Target Support Files/Pods-GRDBTests/Pods-GRDBTests.release.xcconfig"; sourceTree = ""; }; 47C5D1B9AFFE795AA1D6EA5D /* Pods-GRDBTestsEncrypted.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-GRDBTestsEncrypted.release.xcconfig"; path = "Target Support Files/Pods-GRDBTestsEncrypted/Pods-GRDBTestsEncrypted.release.xcconfig"; sourceTree = ""; }; + 56071DE12BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SingletonUserDefaultsTest.swift; sourceTree = ""; }; + 561F38DF2AC891890051EEE9 /* JSONExpressionsTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONExpressionsTests.swift; sourceTree = ""; }; + 561F38E02AC891890051EEE9 /* JSONColumnTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = JSONColumnTests.swift; sourceTree = ""; }; + 561F38FD2AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataEncodingStrategyTests.swift; sourceTree = ""; }; + 561F38FE2AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDataDecodingStrategyTests.swift; sourceTree = ""; }; + 5623B6212AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueInMemoryCopyTests.swift; sourceTree = ""; }; + 5623B6222AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseQueueTemporaryCopyTests.swift; sourceTree = ""; }; 56419EFD24A54093004967E1 /* FetchRequestTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FetchRequestTests.swift; sourceTree = ""; }; 56419EFE24A54093004967E1 /* DatabasePoolBackupTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabasePoolBackupTests.swift; sourceTree = ""; }; 56419EFF24A54093004967E1 /* TableRecordDeleteTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableRecordDeleteTests.swift; sourceTree = ""; }; @@ -679,6 +734,23 @@ 564A2156226B8E18001F64F1 /* GRDBTestsEncrypted.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = GRDBTestsEncrypted.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; 564A2158226C8F24001F64F1 /* db.SQLCipher3 */ = {isa = PBXFileReference; lastKnownFileType = file; path = db.SQLCipher3; sourceTree = SOURCE_ROOT; }; 565A27C827871FE500659A62 /* BackupTestCase.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = BackupTestCase.swift; sourceTree = ""; }; + 567B23192A29BFE100C61174 /* Issue1383.sqlite */ = {isa = PBXFileReference; lastKnownFileType = file; path = Issue1383.sqlite; sourceTree = ""; }; + 567B5C0A2AD32A0000629622 /* SingletonRecordTest.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SingletonRecordTest.swift; sourceTree = ""; }; + 567B5C0B2AD32A0000629622 /* SharedValueObservationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SharedValueObservationTests.swift; sourceTree = ""; }; + 567B5C0C2AD32A0000629622 /* SQLExpressionIsConstantTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLExpressionIsConstantTests.swift; sourceTree = ""; }; + 567B5C0D2AD32A0000629622 /* SQLIdentifyingColumnsTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SQLIdentifyingColumnsTests.swift; sourceTree = ""; }; + 567B5C0E2AD32A0000629622 /* TransactionDateTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TransactionDateTests.swift; sourceTree = ""; }; + 567B5C0F2AD32A0000629622 /* TableTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TableTests.swift; sourceTree = ""; }; + 567B5C432AD32F5900629622 /* DatabaseDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseDumpTests.swift; sourceTree = ""; }; + 567B5C442AD32F5900629622 /* DatabaseReaderDumpTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseReaderDumpTests.swift; sourceTree = ""; }; + 568C3F812A5AB38300A2309D /* ForeignKeyDefinitionTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ForeignKeyDefinitionTests.swift; sourceTree = ""; }; + 568C3F842A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AssociationPrefetchingRelationTests.swift; sourceTree = ""; }; + 568C3F852A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseColumnEncodingStrategyTests.swift; sourceTree = ""; }; + 568C3F862A5AB3A800A2309D /* FoundationDecimalTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FoundationDecimalTests.swift; sourceTree = ""; }; + 568C3F872A5AB3A800A2309D /* CommonTableExpressionTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CommonTableExpressionTests.swift; sourceTree = ""; }; + 568C3F882A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CaseInsensitiveIdentifierTests.swift; sourceTree = ""; }; + 568C3F892A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = DatabaseSnapshotPoolTests.swift; sourceTree = ""; }; + 568C3F8A2A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RecordMinimalNonOptionalPrimaryKeySingleTests.swift; sourceTree = ""; }; 5691D97127257C930021D540 /* AvailableElements.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AvailableElements.swift; sourceTree = ""; }; 56F61DF2283D4AB100AF9884 /* GRDBTests-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "GRDBTests-Bridging-Header.h"; sourceTree = ""; }; 56F61DF4283D4AB100AF9884 /* getThreadsCount.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = getThreadsCount.c; sourceTree = ""; }; @@ -779,6 +851,7 @@ 564A1F6F226B89D6001F64F1 /* Betty.jpeg */, 56419F4E24A54098004967E1 /* InflectionsTests.json */, 564A2158226C8F24001F64F1 /* db.SQLCipher3 */, + 567B23192A29BFE100C61174 /* Issue1383.sqlite */, 56419FA524A5409E004967E1 /* AnyCursorTests.swift */, 56419F5A24A54098004967E1 /* AssociationAggregateTests.swift */, 56419FBD24A540A0004967E1 /* AssociationBelongsToDecodableRecordTests.swift */, @@ -807,14 +880,17 @@ 56419F9A24A5409D004967E1 /* AssociationPrefetchingCodableRecordTests.swift */, 56419F2224A54095004967E1 /* AssociationPrefetchingFetchableRecordTests.swift */, 56419F8F24A5409C004967E1 /* AssociationPrefetchingObservationTests.swift */, + 568C3F842A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift */, 56419F1E24A54094004967E1 /* AssociationPrefetchingRowTests.swift */, 56419F4424A54097004967E1 /* AssociationPrefetchingSQLTests.swift */, 56419F5024A54098004967E1 /* AssociationRowScopeSearchTests.swift */, 56419F9E24A5409D004967E1 /* AssociationTableAliasTestsSQLTests.swift */, 565A27C827871FE500659A62 /* BackupTestCase.swift */, + 568C3F882A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift */, 56419F6B24A5409A004967E1 /* CGFloatTests.swift */, 56419F6524A54099004967E1 /* ColumnExpressionTests.swift */, 56419F9324A5409D004967E1 /* ColumnInfoTests.swift */, + 568C3F872A5AB3A800A2309D /* CommonTableExpressionTests.swift */, 56419F5424A54098004967E1 /* CompilationProtocolTests.swift */, 56419F8024A5409B004967E1 /* CompilationSubClassTests.swift */, 56419FBF24A540A0004967E1 /* CursorTests.swift */, @@ -822,10 +898,14 @@ 56419F9D24A5409D004967E1 /* DatabaseAfterNextTransactionCommitTests.swift */, 56419F8A24A5409C004967E1 /* DatabaseAggregateTests.swift */, 56419FBB24A540A0004967E1 /* DatabaseCollationTests.swift */, + 568C3F852A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift */, 56419F7024A5409A004967E1 /* DatabaseConfigurationTests.swift */, 56419F2324A54095004967E1 /* DatabaseCursorTests.swift */, + 561F38FE2AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift */, + 561F38FD2AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift */, 56419FBE24A540A0004967E1 /* DatabaseDateDecodingStrategyTests.swift */, 56419F2824A54095004967E1 /* DatabaseDateEncodingStrategyTests.swift */, + 567B5C432AD32F5900629622 /* DatabaseDumpTests.swift */, 56419F5824A54098004967E1 /* DatabaseErrorTests.swift */, 56419FC024A540A0004967E1 /* DatabaseFunctionTests.swift */, 56419F6024A54099004967E1 /* DatabaseLogErrorTests.swift */, @@ -840,15 +920,19 @@ 56419F7424A5409A004967E1 /* DatabasePoolTests.swift */, 56419F1324A54093004967E1 /* DatabaseQueueBackupTests.swift */, 56419F7F24A5409B004967E1 /* DatabaseQueueConcurrencyTests.swift */, + 5623B6212AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift */, 56419FC424A540A0004967E1 /* DatabaseQueueInMemoryTests.swift */, 56419F8424A5409B004967E1 /* DatabaseQueueReadOnlyTests.swift */, 56419F3424A54096004967E1 /* DatabaseQueueReleaseMemoryTests.swift */, 56419F3224A54096004967E1 /* DatabaseQueueSchemaCacheTests.swift */, + 5623B6222AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift */, 56419F9924A5409D004967E1 /* DatabaseQueueTests.swift */, + 567B5C442AD32F5900629622 /* DatabaseReaderDumpTests.swift */, 56419F5C24A54099004967E1 /* DatabaseReaderTests.swift */, 56419F0524A54093004967E1 /* DatabaseRegionObservationTests.swift */, 56419F0424A54093004967E1 /* DatabaseRegionTests.swift */, 56419F0D24A54093004967E1 /* DatabaseSavepointTests.swift */, + 568C3F892A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift */, 56419FB424A5409F004967E1 /* DatabaseSnapshotTests.swift */, 56419F1D24A54094004967E1 /* DatabaseSuspensionTests.swift */, 56419F7324A5409A004967E1 /* DatabaseTests.swift */, @@ -877,10 +961,12 @@ 56419EFD24A54093004967E1 /* FetchRequestTests.swift */, 56419FAD24A5409F004967E1 /* FilterCursorTests.swift */, 56419FAE24A5409F004967E1 /* FlattenCursorTests.swift */, + 568C3F812A5AB38300A2309D /* ForeignKeyDefinitionTests.swift */, 56419F7224A5409A004967E1 /* ForeignKeyInfoTests.swift */, 56419FC524A540A1004967E1 /* FoundationDataTests.swift */, 56419F2524A54095004967E1 /* FoundationDateComponentsTests.swift */, 56419F1124A54093004967E1 /* FoundationDateTests.swift */, + 568C3F862A5AB3A800A2309D /* FoundationDecimalTests.swift */, 56419F9F24A5409E004967E1 /* FoundationNSDataTests.swift */, 56419FB224A5409F004967E1 /* FoundationNSDateTests.swift */, 56419F8124A5409B004967E1 /* FoundationNSDecimalNumberTests.swift */, @@ -907,6 +993,8 @@ 56419FA824A5409E004967E1 /* IndexInfoTests.swift */, 56419F3824A54096004967E1 /* InflectionsTests.swift */, 56419F6D24A5409A004967E1 /* JoinSupportTests.swift */, + 561F38E02AC891890051EEE9 /* JSONColumnTests.swift */, + 561F38DF2AC891890051EEE9 /* JSONExpressionsTests.swift */, 56419F7B24A5409B004967E1 /* MapCursorTests.swift */, 56419F4524A54097004967E1 /* MutablePersistableRecordChangesTests.swift */, 56419F0224A54093004967E1 /* MutablePersistableRecordEncodableTests.swift */, @@ -927,6 +1015,7 @@ 56419F0724A54093004967E1 /* Record+QueryInterfaceRequestTests.swift */, 56419F2124A54094004967E1 /* RecordEditedTests.swift */, 56419F4824A54097004967E1 /* RecordInitializersTests.swift */, + 568C3F8A2A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift */, 56419F0C24A54093004967E1 /* RecordMinimalPrimaryKeyRowIDTests.swift */, 56419F6124A54099004967E1 /* RecordMinimalPrimaryKeySingleTests.swift */, 56419F5124A54098004967E1 /* RecordPersistenceConflictPolicy.swift */, @@ -950,7 +1039,12 @@ 56419F6E24A5409A004967E1 /* RowTestCase.swift */, 56419F8524A5409B004967E1 /* SchedulingWatchdogTests.swift */, 56419F5B24A54098004967E1 /* SelectStatementTests.swift */, + 567B5C0B2AD32A0000629622 /* SharedValueObservationTests.swift */, + 567B5C0A2AD32A0000629622 /* SingletonRecordTest.swift */, + 56071DE12BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift */, + 567B5C0C2AD32A0000629622 /* SQLExpressionIsConstantTests.swift */, 56419F1C24A54094004967E1 /* SQLExpressionLiteralTests.swift */, + 567B5C0D2AD32A0000629622 /* SQLIdentifyingColumnsTests.swift */, 56419F3624A54096004967E1 /* SQLLiteralTests.swift */, 56419F6924A54099004967E1 /* SQLRequestTests.swift */, 56419F1924A54094004967E1 /* StatementArguments+FoundationTests.swift */, @@ -961,6 +1055,8 @@ 56419EFF24A54093004967E1 /* TableRecordDeleteTests.swift */, 56419F9024A5409C004967E1 /* TableRecordTests.swift */, 56419F0624A54093004967E1 /* TableRecordUpdateTests.swift */, + 567B5C0F2AD32A0000629622 /* TableTests.swift */, + 567B5C0E2AD32A0000629622 /* TransactionDateTests.swift */, 56419F8B24A5409C004967E1 /* TransactionObserverSavepointsTests.swift */, 56419F7A24A5409B004967E1 /* TransactionObserverTests.swift */, 56419FB624A5409F004967E1 /* TruncateOptimizationTests.swift */, @@ -1090,6 +1186,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 567B231A2A29BFE100C61174 /* Issue1383.sqlite in Resources */, 5641A06A24A540A1004967E1 /* InflectionsTests.json in Resources */, 564A2026226B89E1001F64F1 /* Betty.jpeg in Resources */, 564A2159226C8F25001F64F1 /* db.SQLCipher3 in Resources */, @@ -1100,6 +1197,7 @@ isa = PBXResourcesBuildPhase; buildActionMask = 2147483647; files = ( + 567B231B2A29BFE100C61174 /* Issue1383.sqlite in Resources */, 5641A06B24A540A1004967E1 /* InflectionsTests.json in Resources */, 564A2151226B8E18001F64F1 /* Betty.jpeg in Resources */, 564A215A226C8F25001F64F1 /* db.SQLCipher3 in Resources */, @@ -1197,6 +1295,7 @@ 5641A0A424A540A1004967E1 /* CGFloatTests.swift in Sources */, 5641A0F824A540A1004967E1 /* RecordPrimaryKeyNoneTests.swift in Sources */, 5641A01C24A540A1004967E1 /* PrefixCursorTests.swift in Sources */, + 568C3F8D2A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift in Sources */, 56419FEE24A540A1004967E1 /* DatabasePoolCollationTests.swift in Sources */, 5641A15424A540A2004967E1 /* DataMemoryTests.swift in Sources */, 5641A0D224A540A1004967E1 /* AssociationHasOneThroughSQLTests.swift in Sources */, @@ -1206,11 +1305,14 @@ 5641A17C24A540C7004967E1 /* Prefix.swift in Sources */, 5641A17E24A540C7004967E1 /* Map.swift in Sources */, 5641A0F024A540A1004967E1 /* FTS4RecordTests.swift in Sources */, + 568C3F972A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */, 5641A0BA24A540A1004967E1 /* PrefixWhileCursorTests.swift in Sources */, 5641A0B624A540A1004967E1 /* DatabasePoolTests.swift in Sources */, 5641A0B424A540A1004967E1 /* DatabaseTests.swift in Sources */, + 568C3F822A5AB38300A2309D /* ForeignKeyDefinitionTests.swift in Sources */, 5641A06024A540A1004967E1 /* AssociationChainRowScopesTests.swift in Sources */, 5641A0C624A540A1004967E1 /* FetchableRecordDecodableTests.swift in Sources */, + 567B5C452AD32F5900629622 /* DatabaseDumpTests.swift in Sources */, 5641A03424A540A1004967E1 /* RowFromDictionaryTests.swift in Sources */, 5641A00424A540A1004967E1 /* ValueObservationReadonlyTests.swift in Sources */, 5641A06624A540A1004967E1 /* FetchableRecord+QueryInterfaceRequestTests.swift in Sources */, @@ -1220,6 +1322,7 @@ 5641A0EE24A540A1004967E1 /* TableRecordTests.swift in Sources */, 5641A0F224A540A1004967E1 /* ValueObservationRegionRecordingTests.swift in Sources */, 5641A10E24A540A1004967E1 /* AssociationParallelDecodableRecordTests.swift in Sources */, + 567B5C102AD32A0000629622 /* SingletonRecordTest.swift in Sources */, 5641A08424A540A1004967E1 /* SelectStatementTests.swift in Sources */, 5641A0DC24A540A1004967E1 /* GRDBTestCase.swift in Sources */, 5641A09C24A540A1004967E1 /* DatabasePoolConcurrencyTests.swift in Sources */, @@ -1229,6 +1332,7 @@ 5641A0CA24A540A1004967E1 /* DatabaseUUIDEncodingStrategyTests.swift in Sources */, 565A27C927871FE500659A62 /* BackupTestCase.swift in Sources */, 56419FD624A540A1004967E1 /* DatabaseRegionTests.swift in Sources */, + 567B5C182AD32A0000629622 /* TransactionDateTests.swift in Sources */, 5641A0A824A540A1004967E1 /* JoinSupportTests.swift in Sources */, 5641A0D424A540A1004967E1 /* FoundationUUIDTests.swift in Sources */, 56419FF424A540A1004967E1 /* DatabaseQueueBackupTests.swift in Sources */, @@ -1251,6 +1355,7 @@ 5641A09624A540A1004967E1 /* DatabaseValueConvertibleEscapingTests.swift in Sources */, 5641A0EC24A540A1004967E1 /* AssociationPrefetchingObservationTests.swift in Sources */, 5641A01824A540A1004967E1 /* FoundationDateComponentsTests.swift in Sources */, + 567B5C1A2AD32A0000629622 /* TableTests.swift in Sources */, 5641A01E24A540A1004967E1 /* DatabaseDateEncodingStrategyTests.swift in Sources */, 56419FCE24A540A1004967E1 /* AssociationBelongsToSQLDerivationTests.swift in Sources */, 56F61DF6283D4AB100AF9884 /* getThreadsCount.c in Sources */, @@ -1258,14 +1363,17 @@ 5641A02224A540A1004967E1 /* ValueObservationFetchTests.swift in Sources */, 5641A12624A540A1004967E1 /* DatabasePoolSchemaCacheTests.swift in Sources */, 5641A13024A540A1004967E1 /* AssociationBelongsToFetchableRecordTests.swift in Sources */, + 568C3F8F2A5AB3A800A2309D /* FoundationDecimalTests.swift in Sources */, 56419FFE24A540A1004967E1 /* FTS5TokenizerTests.swift in Sources */, 5641A0FA24A540A1004967E1 /* FetchableRecordTests.swift in Sources */, 5641A09424A540A1004967E1 /* ValueObservationCountTests.swift in Sources */, 5641A0EA24A540A1004967E1 /* RowFromDictionaryLiteralTests.swift in Sources */, 5641A09224A540A1004967E1 /* AssociationHasOneThroughDecodableRecordTests.swift in Sources */, 5641A04E24A540A1004967E1 /* RecordUniqueIndexTests.swift in Sources */, + 568C3F912A5AB3A800A2309D /* CommonTableExpressionTests.swift in Sources */, 5641A12824A540A1004967E1 /* FilterCursorTests.swift in Sources */, 5641A0CE24A540A1004967E1 /* CompilationSubClassTests.swift in Sources */, + 568C3F952A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift in Sources */, 5641A0DE24A540A1004967E1 /* QueryInterfaceExpressionsTests.swift in Sources */, 5641A0C424A540A1004967E1 /* MapCursorTests.swift in Sources */, 5641A06E24A540A1004967E1 /* AssociationRowScopeSearchTests.swift in Sources */, @@ -1277,6 +1385,7 @@ 5641A01224A540A1004967E1 /* AssociationPrefetchingFetchableRecordTests.swift in Sources */, 5641A0E624A540A1004967E1 /* FailureTestCase.swift in Sources */, 56419FD824A540A1004967E1 /* DatabaseRegionObservationTests.swift in Sources */, + 567B5C142AD32A0000629622 /* SQLExpressionIsConstantTests.swift in Sources */, 5641A18024A540C7004967E1 /* Inverted.swift in Sources */, 5641A17224A540C7004967E1 /* RecordingError.swift in Sources */, 5641A09A24A540A1004967E1 /* DatabaseValueConvertibleSubclassTests.swift in Sources */, @@ -1290,6 +1399,7 @@ 5641A03824A540A1004967E1 /* VirtualTableModuleTests.swift in Sources */, 5641A02024A540A1004967E1 /* UpdateStatementTests.swift in Sources */, 5641A0E424A540A1004967E1 /* TransactionObserverSavepointsTests.swift in Sources */, + 5623B6252AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */, 5641A00C24A540A1004967E1 /* DatabaseValueConversionErrorTests.swift in Sources */, 56419FD224A540A1004967E1 /* MutablePersistableRecordEncodableTests.swift in Sources */, 56419FD024A540A1004967E1 /* MutablePersistableRecordTests.swift in Sources */, @@ -1301,6 +1411,7 @@ 5641A08224A540A1004967E1 /* AssociationAggregateTests.swift in Sources */, 5641A12424A540A1004967E1 /* DatabaseTraceTests.swift in Sources */, 56419FE224A540A1004967E1 /* AssociationBelongsToRowScopeTests.swift in Sources */, + 561F38E12AC891890051EEE9 /* JSONExpressionsTests.swift in Sources */, 5641A07624A540A1004967E1 /* CompilationProtocolTests.swift in Sources */, 5641A14624A540A2004967E1 /* DatabaseValueConvertibleFetchTests.swift in Sources */, 5641A03C24A540A1004967E1 /* AssociationBelongsToSQLTests.swift in Sources */, @@ -1311,6 +1422,7 @@ 5641A0F424A540A1004967E1 /* ColumnInfoTests.swift in Sources */, 5641A07A24A540A1004967E1 /* QueryInterfacePromiseTests.swift in Sources */, 5641A0E824A540A1004967E1 /* AssociationParallelSQLTests.swift in Sources */, + 561F39012AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */, 56419FE024A540A1004967E1 /* AssociationChainSQLTests.swift in Sources */, 5641A03E24A540A1004967E1 /* InflectionsTests.swift in Sources */, 5641A06224A540A1004967E1 /* DropWhileCursorTests.swift in Sources */, @@ -1320,6 +1432,7 @@ 5641A05024A540A1004967E1 /* FTS3PatternTests.swift in Sources */, 5641A17024A540C7004967E1 /* Recorder.swift in Sources */, 5641A11024A540A1004967E1 /* Row+FoundationTests.swift in Sources */, + 568C3F932A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift in Sources */, 56419FC824A540A1004967E1 /* FetchRequestTests.swift in Sources */, 56419FDC24A540A1004967E1 /* Record+QueryInterfaceRequestTests.swift in Sources */, 5641A0B224A540A1004967E1 /* ForeignKeyInfoTests.swift in Sources */, @@ -1333,10 +1446,12 @@ 5641A0E024A540A1004967E1 /* QueryInterfaceExtensibilityTests.swift in Sources */, 56419FDA24A540A1004967E1 /* TableRecordUpdateTests.swift in Sources */, 5641A01024A540A1004967E1 /* RecordEditedTests.swift in Sources */, + 567B5C472AD32F5900629622 /* DatabaseReaderDumpTests.swift in Sources */, 5641A0A624A540A1004967E1 /* AssociationHasManySQLTests.swift in Sources */, 56419FFC24A540A1004967E1 /* QueryInterfaceRequestTests.swift in Sources */, 5641A0CC24A540A1004967E1 /* DatabaseQueueConcurrencyTests.swift in Sources */, 5641A08C24A540A1004967E1 /* TableDefinitionTests.swift in Sources */, + 567B5C162AD32A0000629622 /* SQLIdentifyingColumnsTests.swift in Sources */, 5641A15624A540A2004967E1 /* DatabaseQueueInMemoryTests.swift in Sources */, 56419FEA24A540A1004967E1 /* RecordPrimaryKeyMultipleTests.swift in Sources */, 5641A10224A540A1004967E1 /* AssociationPrefetchingCodableRecordTests.swift in Sources */, @@ -1346,16 +1461,21 @@ 5641A0B024A540A1004967E1 /* AssociationHasOneThroughRowScopeTests.swift in Sources */, 5641A00A24A540A1004967E1 /* AssociationPrefetchingRowTests.swift in Sources */, 5641A0B824A540A1004967E1 /* FTS5CustomTokenizerTests.swift in Sources */, + 568C3F8B2A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift in Sources */, 5641A04824A540A1004967E1 /* DatabaseValueConversionTests.swift in Sources */, 5641A07824A540A1004967E1 /* DropFirstCursorTests.swift in Sources */, 5641A13224A540A1004967E1 /* FoundationNSDateTests.swift in Sources */, 56419FE624A540A1004967E1 /* RecordMinimalPrimaryKeyRowIDTests.swift in Sources */, 5641A04424A540A1004967E1 /* FTS3TokenizerTests.swift in Sources */, 5641A14024A540A2004967E1 /* RecordPrimaryKeyRowIDTests.swift in Sources */, + 561F38FF2AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */, + 56071DE22BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift in Sources */, 5641A08A24A540A1004967E1 /* EncryptionTests.swift in Sources */, 5641A11E24A540A1004967E1 /* IndexInfoTests.swift in Sources */, 56419FEC24A540A1004967E1 /* FTS4TableBuilderTests.swift in Sources */, + 567B5C122AD32A0000629622 /* SharedValueObservationTests.swift in Sources */, 5641A18A24A540C7004967E1 /* DatabaseRegionObservationPublisherTests.swift in Sources */, + 5623B6232AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */, 5641A05C24A540A1004967E1 /* ValueObservationRecordTests.swift in Sources */, 5641A18424A540C7004967E1 /* DatabaseWriterWritePublisherTests.swift in Sources */, 5641A00024A540A1004967E1 /* StatementArguments+FoundationTests.swift in Sources */, @@ -1386,6 +1506,7 @@ 5641A05624A540A1004967E1 /* AssociationPrefetchingSQLTests.swift in Sources */, 5641A12224A540A1004967E1 /* FTS5PatternTests.swift in Sources */, 5641A0F624A540A1004967E1 /* FTS5WrapperTokenizerTests.swift in Sources */, + 561F38E32AC891890051EEE9 /* JSONColumnTests.swift in Sources */, 5641A15C24A540A2004967E1 /* AssociationHasOneSQLDerivationTests.swift in Sources */, 5641A03024A540A1004967E1 /* MutablePersistableRecordPersistenceConflictPolicyTests.swift in Sources */, 5641A00224A540A1004967E1 /* PersistableRecordTests.swift in Sources */, @@ -1422,6 +1543,7 @@ 5641A0A524A540A1004967E1 /* CGFloatTests.swift in Sources */, 5641A0F924A540A1004967E1 /* RecordPrimaryKeyNoneTests.swift in Sources */, 5641A01D24A540A1004967E1 /* PrefixCursorTests.swift in Sources */, + 568C3F8E2A5AB3A800A2309D /* DatabaseColumnEncodingStrategyTests.swift in Sources */, 56419FEF24A540A1004967E1 /* DatabasePoolCollationTests.swift in Sources */, 5641A15524A540A2004967E1 /* DataMemoryTests.swift in Sources */, 5641A0D324A540A1004967E1 /* AssociationHasOneThroughSQLTests.swift in Sources */, @@ -1431,11 +1553,14 @@ 5641A17D24A540C7004967E1 /* Prefix.swift in Sources */, 5641A17F24A540C7004967E1 /* Map.swift in Sources */, 5641A0F124A540A1004967E1 /* FTS4RecordTests.swift in Sources */, + 568C3F982A5AB3A800A2309D /* RecordMinimalNonOptionalPrimaryKeySingleTests.swift in Sources */, 5641A0BB24A540A1004967E1 /* PrefixWhileCursorTests.swift in Sources */, 5641A0B724A540A1004967E1 /* DatabasePoolTests.swift in Sources */, 5641A0B524A540A1004967E1 /* DatabaseTests.swift in Sources */, + 568C3F832A5AB38300A2309D /* ForeignKeyDefinitionTests.swift in Sources */, 5641A06124A540A1004967E1 /* AssociationChainRowScopesTests.swift in Sources */, 5641A0C724A540A1004967E1 /* FetchableRecordDecodableTests.swift in Sources */, + 567B5C462AD32F5900629622 /* DatabaseDumpTests.swift in Sources */, 5641A03524A540A1004967E1 /* RowFromDictionaryTests.swift in Sources */, 5641A00524A540A1004967E1 /* ValueObservationReadonlyTests.swift in Sources */, 5641A06724A540A1004967E1 /* FetchableRecord+QueryInterfaceRequestTests.swift in Sources */, @@ -1445,6 +1570,7 @@ 5641A0EF24A540A1004967E1 /* TableRecordTests.swift in Sources */, 5641A0F324A540A1004967E1 /* ValueObservationRegionRecordingTests.swift in Sources */, 5641A10F24A540A1004967E1 /* AssociationParallelDecodableRecordTests.swift in Sources */, + 567B5C112AD32A0000629622 /* SingletonRecordTest.swift in Sources */, 5641A08524A540A1004967E1 /* SelectStatementTests.swift in Sources */, 5641A0DD24A540A1004967E1 /* GRDBTestCase.swift in Sources */, 5641A09D24A540A1004967E1 /* DatabasePoolConcurrencyTests.swift in Sources */, @@ -1454,6 +1580,7 @@ 5641A0CB24A540A1004967E1 /* DatabaseUUIDEncodingStrategyTests.swift in Sources */, 565A27CA27871FE500659A62 /* BackupTestCase.swift in Sources */, 56419FD724A540A1004967E1 /* DatabaseRegionTests.swift in Sources */, + 567B5C192AD32A0000629622 /* TransactionDateTests.swift in Sources */, 5641A0A924A540A1004967E1 /* JoinSupportTests.swift in Sources */, 5641A0D524A540A1004967E1 /* FoundationUUIDTests.swift in Sources */, 56419FF524A540A1004967E1 /* DatabaseQueueBackupTests.swift in Sources */, @@ -1476,6 +1603,7 @@ 5641A09724A540A1004967E1 /* DatabaseValueConvertibleEscapingTests.swift in Sources */, 5641A0ED24A540A1004967E1 /* AssociationPrefetchingObservationTests.swift in Sources */, 5641A01924A540A1004967E1 /* FoundationDateComponentsTests.swift in Sources */, + 567B5C1B2AD32A0000629622 /* TableTests.swift in Sources */, 5641A01F24A540A1004967E1 /* DatabaseDateEncodingStrategyTests.swift in Sources */, 56419FCF24A540A1004967E1 /* AssociationBelongsToSQLDerivationTests.swift in Sources */, 56F61DF7283D4AB100AF9884 /* getThreadsCount.c in Sources */, @@ -1483,14 +1611,17 @@ 5641A02324A540A1004967E1 /* ValueObservationFetchTests.swift in Sources */, 5641A12724A540A1004967E1 /* DatabasePoolSchemaCacheTests.swift in Sources */, 5641A13124A540A1004967E1 /* AssociationBelongsToFetchableRecordTests.swift in Sources */, + 568C3F902A5AB3A800A2309D /* FoundationDecimalTests.swift in Sources */, 56419FFF24A540A1004967E1 /* FTS5TokenizerTests.swift in Sources */, 5641A0FB24A540A1004967E1 /* FetchableRecordTests.swift in Sources */, 5641A09524A540A1004967E1 /* ValueObservationCountTests.swift in Sources */, 5641A0EB24A540A1004967E1 /* RowFromDictionaryLiteralTests.swift in Sources */, 5641A09324A540A1004967E1 /* AssociationHasOneThroughDecodableRecordTests.swift in Sources */, 5641A04F24A540A1004967E1 /* RecordUniqueIndexTests.swift in Sources */, + 568C3F922A5AB3A800A2309D /* CommonTableExpressionTests.swift in Sources */, 5641A12924A540A1004967E1 /* FilterCursorTests.swift in Sources */, 5641A0CF24A540A1004967E1 /* CompilationSubClassTests.swift in Sources */, + 568C3F962A5AB3A800A2309D /* DatabaseSnapshotPoolTests.swift in Sources */, 5641A0DF24A540A1004967E1 /* QueryInterfaceExpressionsTests.swift in Sources */, 5641A0C524A540A1004967E1 /* MapCursorTests.swift in Sources */, 5641A06F24A540A1004967E1 /* AssociationRowScopeSearchTests.swift in Sources */, @@ -1502,6 +1633,7 @@ 5641A01324A540A1004967E1 /* AssociationPrefetchingFetchableRecordTests.swift in Sources */, 5641A0E724A540A1004967E1 /* FailureTestCase.swift in Sources */, 56419FD924A540A1004967E1 /* DatabaseRegionObservationTests.swift in Sources */, + 567B5C152AD32A0000629622 /* SQLExpressionIsConstantTests.swift in Sources */, 5641A18124A540C7004967E1 /* Inverted.swift in Sources */, 5641A17324A540C7004967E1 /* RecordingError.swift in Sources */, 5641A09B24A540A1004967E1 /* DatabaseValueConvertibleSubclassTests.swift in Sources */, @@ -1515,6 +1647,7 @@ 5641A03924A540A1004967E1 /* VirtualTableModuleTests.swift in Sources */, 5641A02124A540A1004967E1 /* UpdateStatementTests.swift in Sources */, 5641A0E524A540A1004967E1 /* TransactionObserverSavepointsTests.swift in Sources */, + 5623B6262AED3A2200436239 /* DatabaseQueueTemporaryCopyTests.swift in Sources */, 5641A00D24A540A1004967E1 /* DatabaseValueConversionErrorTests.swift in Sources */, 56419FD324A540A1004967E1 /* MutablePersistableRecordEncodableTests.swift in Sources */, 56419FD124A540A1004967E1 /* MutablePersistableRecordTests.swift in Sources */, @@ -1526,6 +1659,7 @@ 5641A08324A540A1004967E1 /* AssociationAggregateTests.swift in Sources */, 5641A12524A540A1004967E1 /* DatabaseTraceTests.swift in Sources */, 56419FE324A540A1004967E1 /* AssociationBelongsToRowScopeTests.swift in Sources */, + 561F38E22AC891890051EEE9 /* JSONExpressionsTests.swift in Sources */, 5641A07724A540A1004967E1 /* CompilationProtocolTests.swift in Sources */, 5641A14724A540A2004967E1 /* DatabaseValueConvertibleFetchTests.swift in Sources */, 5641A03D24A540A1004967E1 /* AssociationBelongsToSQLTests.swift in Sources */, @@ -1536,6 +1670,7 @@ 5641A0F524A540A1004967E1 /* ColumnInfoTests.swift in Sources */, 5641A07B24A540A1004967E1 /* QueryInterfacePromiseTests.swift in Sources */, 5641A0E924A540A1004967E1 /* AssociationParallelSQLTests.swift in Sources */, + 561F39022AC9CE870051EEE9 /* DatabaseDataDecodingStrategyTests.swift in Sources */, 56419FE124A540A1004967E1 /* AssociationChainSQLTests.swift in Sources */, 5641A03F24A540A1004967E1 /* InflectionsTests.swift in Sources */, 5641A06324A540A1004967E1 /* DropWhileCursorTests.swift in Sources */, @@ -1545,6 +1680,7 @@ 5641A05124A540A1004967E1 /* FTS3PatternTests.swift in Sources */, 5641A17124A540C7004967E1 /* Recorder.swift in Sources */, 5641A11124A540A1004967E1 /* Row+FoundationTests.swift in Sources */, + 568C3F942A5AB3A800A2309D /* CaseInsensitiveIdentifierTests.swift in Sources */, 56419FC924A540A1004967E1 /* FetchRequestTests.swift in Sources */, 56419FDD24A540A1004967E1 /* Record+QueryInterfaceRequestTests.swift in Sources */, 5641A0B324A540A1004967E1 /* ForeignKeyInfoTests.swift in Sources */, @@ -1558,10 +1694,12 @@ 5641A0E124A540A1004967E1 /* QueryInterfaceExtensibilityTests.swift in Sources */, 56419FDB24A540A1004967E1 /* TableRecordUpdateTests.swift in Sources */, 5641A01124A540A1004967E1 /* RecordEditedTests.swift in Sources */, + 567B5C482AD32F5900629622 /* DatabaseReaderDumpTests.swift in Sources */, 5641A0A724A540A1004967E1 /* AssociationHasManySQLTests.swift in Sources */, 56419FFD24A540A1004967E1 /* QueryInterfaceRequestTests.swift in Sources */, 5641A0CD24A540A1004967E1 /* DatabaseQueueConcurrencyTests.swift in Sources */, 5641A08D24A540A1004967E1 /* TableDefinitionTests.swift in Sources */, + 567B5C172AD32A0000629622 /* SQLIdentifyingColumnsTests.swift in Sources */, 5641A15724A540A2004967E1 /* DatabaseQueueInMemoryTests.swift in Sources */, 56419FEB24A540A1004967E1 /* RecordPrimaryKeyMultipleTests.swift in Sources */, 5641A10324A540A1004967E1 /* AssociationPrefetchingCodableRecordTests.swift in Sources */, @@ -1571,16 +1709,21 @@ 5641A0B124A540A1004967E1 /* AssociationHasOneThroughRowScopeTests.swift in Sources */, 5641A00B24A540A1004967E1 /* AssociationPrefetchingRowTests.swift in Sources */, 5641A0B924A540A1004967E1 /* FTS5CustomTokenizerTests.swift in Sources */, + 568C3F8C2A5AB3A800A2309D /* AssociationPrefetchingRelationTests.swift in Sources */, 5641A04924A540A1004967E1 /* DatabaseValueConversionTests.swift in Sources */, 5641A07924A540A1004967E1 /* DropFirstCursorTests.swift in Sources */, 5641A13324A540A1004967E1 /* FoundationNSDateTests.swift in Sources */, 56419FE724A540A1004967E1 /* RecordMinimalPrimaryKeyRowIDTests.swift in Sources */, 5641A04524A540A1004967E1 /* FTS3TokenizerTests.swift in Sources */, 5641A14124A540A2004967E1 /* RecordPrimaryKeyRowIDTests.swift in Sources */, + 561F39002AC9CE870051EEE9 /* DatabaseDataEncodingStrategyTests.swift in Sources */, + 56071DE32BD3DDA5000802B6 /* SingletonUserDefaultsTest.swift in Sources */, 5641A08B24A540A1004967E1 /* EncryptionTests.swift in Sources */, 5641A11F24A540A1004967E1 /* IndexInfoTests.swift in Sources */, 56419FED24A540A1004967E1 /* FTS4TableBuilderTests.swift in Sources */, + 567B5C132AD32A0000629622 /* SharedValueObservationTests.swift in Sources */, 5641A18B24A540C7004967E1 /* DatabaseRegionObservationPublisherTests.swift in Sources */, + 5623B6242AED3A2200436239 /* DatabaseQueueInMemoryCopyTests.swift in Sources */, 5641A05D24A540A1004967E1 /* ValueObservationRecordTests.swift in Sources */, 5641A18524A540C7004967E1 /* DatabaseWriterWritePublisherTests.swift in Sources */, 5641A00124A540A1004967E1 /* StatementArguments+FoundationTests.swift in Sources */, @@ -1611,6 +1754,7 @@ 5641A05724A540A1004967E1 /* AssociationPrefetchingSQLTests.swift in Sources */, 5641A12324A540A1004967E1 /* FTS5PatternTests.swift in Sources */, 5641A0F724A540A1004967E1 /* FTS5WrapperTokenizerTests.swift in Sources */, + 561F38E42AC891890051EEE9 /* JSONColumnTests.swift in Sources */, 5641A15D24A540A2004967E1 /* AssociationHasOneSQLDerivationTests.swift in Sources */, 5641A03124A540A1004967E1 /* MutablePersistableRecordPersistenceConflictPolicyTests.swift in Sources */, 5641A00324A540A1004967E1 /* PersistableRecordTests.swift in Sources */, diff --git a/Tests/CocoaPods/SQLCipher4/Podfile b/Tests/CocoaPods/SQLCipher4/Podfile index d8cd6ce44c..031d49b72e 100644 --- a/Tests/CocoaPods/SQLCipher4/Podfile +++ b/Tests/CocoaPods/SQLCipher4/Podfile @@ -17,7 +17,27 @@ end post_install do |installer| installer.pods_project.targets.each do |target| target.build_configurations.each do |config| + # Workaround for Xcode 14.3+ + # https://github.com/CocoaPods/CocoaPods/issues/11839 + config.build_settings['MACOSX_DEPLOYMENT_TARGET'] = '10.13' config.build_settings['GCC_OPTIMIZATION_LEVEL'] = '3' end end + + # TODO: remove when https://github.com/CocoaPods/CocoaPods/pull/12009 is merged. + # https://github.com/CocoaPods/CocoaPods/issues/12012#issuecomment-1655191516 + installer.aggregate_targets.each do |target| + target.xcconfigs.each do |variant, xcconfig| + xcconfig_path = target.client_root + target.xcconfig_relative_path(variant) + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + installer.pods_project.targets.each do |target| + target.build_configurations.each do |config| + if config.base_configuration_reference.is_a? Xcodeproj::Project::Object::PBXFileReference + xcconfig_path = config.base_configuration_reference.real_path + IO.write(xcconfig_path, IO.read(xcconfig_path).gsub("DT_TOOLCHAIN_DIR", "TOOLCHAIN_DIR")) + end + end + end end diff --git a/Tests/CombineExpectations/PublisherExpectations/AvailableElements.swift b/Tests/CombineExpectations/PublisherExpectations/AvailableElements.swift index 82947bcca5..402b3f5535 100644 --- a/Tests/CombineExpectations/PublisherExpectations/AvailableElements.swift +++ b/Tests/CombineExpectations/PublisherExpectations/AvailableElements.swift @@ -1,7 +1,7 @@ #if canImport(Combine) import XCTest -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation which waits for the timeout to expire, or /// the recorded publisher to complete. diff --git a/Tests/CombineExpectations/PublisherExpectations/Finished.swift b/Tests/CombineExpectations/PublisherExpectations/Finished.swift index 39fa11bfa0..0a47d09f62 100644 --- a/Tests/CombineExpectations/PublisherExpectations/Finished.swift +++ b/Tests/CombineExpectations/PublisherExpectations/Finished.swift @@ -17,7 +17,7 @@ import XCTest // try wait(for: recorder.finished.inverted, timeout: 1) // } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation which waits for the recorded publisher /// to complete. diff --git a/Tests/CombineExpectations/PublisherExpectations/Inverted.swift b/Tests/CombineExpectations/PublisherExpectations/Inverted.swift index 076611d340..c7f8d72cc3 100644 --- a/Tests/CombineExpectations/PublisherExpectations/Inverted.swift +++ b/Tests/CombineExpectations/PublisherExpectations/Inverted.swift @@ -1,7 +1,7 @@ #if canImport(Combine) import XCTest -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation that fails if the base expectation is fulfilled. /// diff --git a/Tests/CombineExpectations/PublisherExpectations/Map.swift b/Tests/CombineExpectations/PublisherExpectations/Map.swift index 087efbd336..ab4f95c733 100644 --- a/Tests/CombineExpectations/PublisherExpectations/Map.swift +++ b/Tests/CombineExpectations/PublisherExpectations/Map.swift @@ -1,7 +1,7 @@ #if canImport(Combine) import XCTest -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation that transforms the value of a base expectation. /// @@ -20,7 +20,7 @@ extension PublisherExpectations { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectation { /// Returns a publisher expectation that transforms the value of the /// base expectation. diff --git a/Tests/CombineExpectations/PublisherExpectations/Next.swift b/Tests/CombineExpectations/PublisherExpectations/Next.swift index 937e3b4271..76ad0c1055 100644 --- a/Tests/CombineExpectations/PublisherExpectations/Next.swift +++ b/Tests/CombineExpectations/PublisherExpectations/Next.swift @@ -1,7 +1,7 @@ #if canImport(Combine) import XCTest -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation which waits for the recorded publisher to emit /// `count` elements, or to complete. diff --git a/Tests/CombineExpectations/PublisherExpectations/NextOne.swift b/Tests/CombineExpectations/PublisherExpectations/NextOne.swift index 6333b6b205..84ee6233e5 100644 --- a/Tests/CombineExpectations/PublisherExpectations/NextOne.swift +++ b/Tests/CombineExpectations/PublisherExpectations/NextOne.swift @@ -1,7 +1,7 @@ #if canImport(Combine) import XCTest -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation which waits for the recorded publisher to emit /// one element, or to complete. diff --git a/Tests/CombineExpectations/PublisherExpectations/Prefix.swift b/Tests/CombineExpectations/PublisherExpectations/Prefix.swift index 2740508988..11aced69da 100644 --- a/Tests/CombineExpectations/PublisherExpectations/Prefix.swift +++ b/Tests/CombineExpectations/PublisherExpectations/Prefix.swift @@ -1,7 +1,7 @@ #if canImport(Combine) import XCTest -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation which waits for the recorded publisher to emit /// `maxLength` elements, or to complete. diff --git a/Tests/CombineExpectations/PublisherExpectations/Recording.swift b/Tests/CombineExpectations/PublisherExpectations/Recording.swift index 97d76d394c..0b95292dc0 100644 --- a/Tests/CombineExpectations/PublisherExpectations/Recording.swift +++ b/Tests/CombineExpectations/PublisherExpectations/Recording.swift @@ -2,7 +2,7 @@ import Combine import XCTest -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// A publisher expectation which waits for the recorded publisher /// to complete. @@ -49,7 +49,7 @@ extension PublisherExpectations { /// } public func get() throws -> Record.Recording { try recorder.value { (elements, completion, remainingElements, consume) in - if let completion = completion { + if let completion { consume(remainingElements.count) return Record.Recording(output: elements, completion: completion) } else { diff --git a/Tests/CombineExpectations/Recorder.swift b/Tests/CombineExpectations/Recorder.swift index 6b91f405ca..0d7031aaf1 100644 --- a/Tests/CombineExpectations/Recorder.swift +++ b/Tests/CombineExpectations/Recorder.swift @@ -13,7 +13,7 @@ import XCTest /// /// let elements = try wait(for: recorder.elements, timeout: 1) /// XCTAssertEqual(elements, ["foo", "bar", "baz"]) -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public class Recorder: Subscriber { public typealias Input = Input public typealias Failure = Failure @@ -106,53 +106,59 @@ public class Recorder: Subscriber { /// the expectation. For example, the Prefix expectation uses true, but /// the NextOne expectation uses false. func fulfillOnInput(_ expectation: XCTestExpectation, includingConsumed: Bool) { - synchronized { - preconditionCanFulfillExpectation() + lock.lock() + + preconditionCanFulfillExpectation() + + let expectedFulfillmentCount = expectation.expectedFulfillmentCount + + switch state { + case .waitingForSubscription: + let exp = RecorderExpectation.onInput(expectation, remainingCount: expectedFulfillmentCount) + state = .waitingForSubscription(exp) + lock.unlock() - let expectedFulfillmentCount = expectation.expectedFulfillmentCount + case let .subscribed(subscription, _, elements): + let maxFulfillmentCount = includingConsumed + ? elements.count + : elements.count - consumedCount + let fulfillmentCount = min(expectedFulfillmentCount, maxFulfillmentCount) - switch state { - case .waitingForSubscription: - let exp = RecorderExpectation.onInput(expectation, remainingCount: expectedFulfillmentCount) - state = .waitingForSubscription(exp) - - case let .subscribed(subscription, _, elements): - let maxFulfillmentCount = includingConsumed - ? elements.count - : elements.count - consumedCount - let fulfillmentCount = min(expectedFulfillmentCount, maxFulfillmentCount) - expectation.fulfill(count: fulfillmentCount) - - let remainingCount = expectedFulfillmentCount - fulfillmentCount - if remainingCount > 0 { - let exp = RecorderExpectation.onInput(expectation, remainingCount: remainingCount) - state = .subscribed(subscription, exp, elements) - } - - case .completed: - expectation.fulfill(count: expectedFulfillmentCount) + let remainingCount = expectedFulfillmentCount - fulfillmentCount + if remainingCount > 0 { + let exp = RecorderExpectation.onInput(expectation, remainingCount: remainingCount) + state = .subscribed(subscription, exp, elements) } + lock.unlock() + expectation.fulfill(count: fulfillmentCount) + + case .completed: + lock.unlock() + expectation.fulfill(count: expectedFulfillmentCount) } } /// Registers the expectation so that it gets fulfilled when /// publisher completes. func fulfillOnCompletion(_ expectation: XCTestExpectation) { - synchronized { - preconditionCanFulfillExpectation() + lock.lock() + + preconditionCanFulfillExpectation() + + switch state { + case .waitingForSubscription: + let exp = RecorderExpectation.onCompletion(expectation) + state = .waitingForSubscription(exp) + lock.unlock() - switch state { - case .waitingForSubscription: - let exp = RecorderExpectation.onCompletion(expectation) - state = .waitingForSubscription(exp) - - case let .subscribed(subscription, _, elements): - let exp = RecorderExpectation.onCompletion(expectation) - state = .subscribed(subscription, exp, elements) - - case .completed: - expectation.fulfill() - } + case let .subscribed(subscription, _, elements): + let exp = RecorderExpectation.onCompletion(expectation) + state = .subscribed(subscription, exp, elements) + lock.unlock() + + case .completed: + lock.unlock() + expectation.fulfill() } } @@ -171,7 +177,7 @@ public class Recorder: Subscriber { _ completion: Subscribers.Completion?, _ remainingElements: ArraySlice, _ consume: (_ count: Int) -> ()) throws -> T) - rethrows -> T + rethrows -> T { try synchronized { let (elements, completion) = state.elementsAndCompletion @@ -217,65 +223,71 @@ public class Recorder: Subscriber { } public func receive(_ input: Input) -> Subscribers.Demand { - return synchronized { - switch state { - case let .subscribed(subscription, exp, elements): - var elements = elements - elements.append(input) - - if case let .onInput(expectation, remainingCount: remainingCount) = exp { - assert(remainingCount > 0) - expectation.fulfill() - if remainingCount > 1 { - let exp = RecorderExpectation.onInput(expectation, remainingCount: remainingCount - 1) - state = .subscribed(subscription, exp, elements) - } else { - state = .subscribed(subscription, nil, elements) - } - } else { + lock.lock() + + switch state { + case let .subscribed(subscription, exp, elements): + var elements = elements + elements.append(input) + + if case let .onInput(expectation, remainingCount: remainingCount) = exp { + assert(remainingCount > 0) + expectation.fulfill() + if remainingCount > 1 { + let exp = RecorderExpectation.onInput(expectation, remainingCount: remainingCount - 1) state = .subscribed(subscription, exp, elements) + } else { + state = .subscribed(subscription, nil, elements) } - - return .unlimited - - case .waitingForSubscription: - XCTFail("Publisher recorder got unexpected input before subscription: \(String(reflecting: input))") - return .none - - case .completed: - XCTFail("Publisher recorder got unexpected input after completion: \(String(reflecting: input))") - return .none + } else { + state = .subscribed(subscription, exp, elements) } + + lock.unlock() + return .unlimited + + case .waitingForSubscription: + lock.unlock() + XCTFail("Publisher recorder got unexpected input before subscription: \(String(reflecting: input))") + return .none + + case .completed: + lock.unlock() + XCTFail("Publisher recorder got unexpected input after completion: \(String(reflecting: input))") + return .none } } public func receive(completion: Subscribers.Completion) { - synchronized { - switch state { - case let .subscribed(_, exp, elements): - if let exp = exp { - switch exp { - case let .onCompletion(expectation): - expectation.fulfill() - case let .onInput(expectation, remainingCount: remainingCount): - expectation.fulfill(count: remainingCount) - } + lock.lock() + + switch state { + case let .subscribed(_, exp, elements): + if let exp { + switch exp { + case let .onCompletion(expectation): + expectation.fulfill() + case let .onInput(expectation, remainingCount: remainingCount): + expectation.fulfill(count: remainingCount) } - state = .completed(elements, completion) - - case .waitingForSubscription: - XCTFail("Publisher recorder got unexpected completion before subscription: \(String(describing: completion))") - - case .completed: - XCTFail("Publisher recorder got unexpected completion after completion: \(String(describing: completion))") } + state = .completed(elements, completion) + lock.unlock() + + case .waitingForSubscription: + lock.unlock() + XCTFail("Publisher recorder got unexpected completion before subscription: \(String(describing: completion))") + + case .completed: + lock.unlock() + XCTFail("Publisher recorder got unexpected completion after completion: \(String(describing: completion))") } } } // MARK: - Publisher Expectations -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension PublisherExpectations { /// The type of the publisher expectation returned by `Recorder.completion`. public typealias Completion = Map, Subscribers.Completion> @@ -290,7 +302,7 @@ extension PublisherExpectations { public typealias Single = Map, Input> } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Recorder { /// Returns a publisher expectation which waits for the timeout to expire, /// or the recorded publisher to complete. @@ -572,7 +584,7 @@ extension Recorder { // MARK: - Publisher + Recorder -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Publisher { /// Returns a subscribed Recorder. /// diff --git a/Tests/GRDBCombineTests/DatabaseReaderReadPublisherTests.swift b/Tests/GRDBCombineTests/DatabaseReaderReadPublisherTests.swift index 1f002f238a..2b1cf9394f 100644 --- a/Tests/GRDBCombineTests/DatabaseReaderReadPublisherTests.swift +++ b/Tests/GRDBCombineTests/DatabaseReaderReadPublisherTests.swift @@ -22,7 +22,7 @@ class DatabaseReaderReadPublisherTests : XCTestCase { // MARK: - func testReadPublisher() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -128,7 +128,7 @@ class DatabaseReaderReadPublisherTests : XCTestCase { // frame #71: 0x00007fff72311cc9 libdyld.dylib`start + 1 // frame #72: 0x00007fff72311cc9 libdyld.dylib`start + 1 func testReadPublisherError() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -157,7 +157,7 @@ class DatabaseReaderReadPublisherTests : XCTestCase { // MARK: - func testReadPublisherIsAsynchronous() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -197,7 +197,7 @@ class DatabaseReaderReadPublisherTests : XCTestCase { // MARK: - func testReadPublisherDefaultScheduler() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -237,7 +237,7 @@ class DatabaseReaderReadPublisherTests : XCTestCase { // MARK: - func testReadPublisherCustomScheduler() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -278,7 +278,7 @@ class DatabaseReaderReadPublisherTests : XCTestCase { // MARK: - func testReadPublisherIsReadonly() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } diff --git a/Tests/GRDBCombineTests/DatabaseRegionObservationPublisherTests.swift b/Tests/GRDBCombineTests/DatabaseRegionObservationPublisherTests.swift index a870a00669..e126c54b39 100644 --- a/Tests/GRDBCombineTests/DatabaseRegionObservationPublisherTests.swift +++ b/Tests/GRDBCombineTests/DatabaseRegionObservationPublisherTests.swift @@ -20,7 +20,7 @@ private struct Player: Codable, FetchableRecord, PersistableRecord { class DatabaseRegionObservationPublisherTests : XCTestCase { func testChangesNotifications() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -61,7 +61,7 @@ class DatabaseRegionObservationPublisherTests : XCTestCase { // TODO: do the same, but asynchronously. If this is too hard, update the // public API so that users can easily do it. func testPrependInitialDatabaseSync() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } diff --git a/Tests/GRDBCombineTests/DatabaseWriterWritePublisherTests.swift b/Tests/GRDBCombineTests/DatabaseWriterWritePublisherTests.swift index 4f07e7960c..1ab1420818 100644 --- a/Tests/GRDBCombineTests/DatabaseWriterWritePublisherTests.swift +++ b/Tests/GRDBCombineTests/DatabaseWriterWritePublisherTests.swift @@ -22,7 +22,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWritePublisher() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -49,7 +49,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWritePublisherValue() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -76,7 +76,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWritePublisherError() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -99,7 +99,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { } func testWritePublisherErrorRollbacksTransaction() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -132,7 +132,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWritePublisherIsAsynchronous() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -168,7 +168,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWritePublisherDefaultScheduler() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -206,7 +206,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWritePublisherCustomScheduler() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -247,7 +247,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // TODO: Fix flaky test with both pool and on-disk queue: // - Expectation timeout func testWriteThenReadPublisher() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -274,7 +274,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWriteThenReadPublisherIsReadonly() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -299,7 +299,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // MARK: - func testWriteThenReadPublisherWriteError() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -322,7 +322,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { } func testWriteThenReadPublisherWriteErrorRollbacksTransaction() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -359,7 +359,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // TODO: Fix flaky test with both pool and on-disk queue: // - Expectation timeout func testWriteThenReadPublisherReadError() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -386,7 +386,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { // Regression test against deadlocks created by concurrent completion // and cancellations triggered by .switchToLatest().prefix(1) func testDeadlockPrevention() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -395,7 +395,7 @@ class DatabaseWriterWritePublisherTests : XCTestCase { return writer } - func test(writer: DatabaseWriter, iteration: Int) throws { + func test(writer: some DatabaseWriter, iteration: Int) throws { // print(iteration) let scoreSubject = PassthroughSubject() let publisher = scoreSubject diff --git a/Tests/GRDBCombineTests/Support.swift b/Tests/GRDBCombineTests/Support.swift index bf9c1d3a7e..d8f2ec8381 100644 --- a/Tests/GRDBCombineTests/Support.swift +++ b/Tests/GRDBCombineTests/Support.swift @@ -51,7 +51,7 @@ final class Test { } } -@available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) final class AsyncTest { // Raise the repeatCount in order to help spotting flaky tests. private let repeatCount: Int @@ -100,7 +100,7 @@ final class AsyncTest { } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func assertNoFailure( _ completion: Subscribers.Completion, file: StaticString = #file, @@ -111,7 +111,7 @@ public func assertNoFailure( } } -@available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) public func assertFailure( _ completion: Subscribers.Completion, file: StaticString = #file, diff --git a/Tests/GRDBCombineTests/ValueObservationPublisherTests.swift b/Tests/GRDBCombineTests/ValueObservationPublisherTests.swift index eedd4afc3c..b55fd98c6d 100644 --- a/Tests/GRDBCombineTests/ValueObservationPublisherTests.swift +++ b/Tests/GRDBCombineTests/ValueObservationPublisherTests.swift @@ -22,7 +22,7 @@ class ValueObservationPublisherTests : XCTestCase { // MARK: - Default Scheduler func testDefaultSchedulerChangesNotifications() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -64,7 +64,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testDefaultSchedulerFirstValueIsEmittedAsynchronously() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -97,7 +97,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testDefaultSchedulerError() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -123,7 +123,7 @@ class ValueObservationPublisherTests : XCTestCase { // MARK: - Immediate Scheduler func testImmediateSchedulerChangesNotifications() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -165,7 +165,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testImmediateSchedulerEmitsFirstValueSynchronously() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -201,7 +201,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testImmediateSchedulerError() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -226,7 +226,7 @@ class ValueObservationPublisherTests : XCTestCase { // MARK: - Demand - @available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) private class DemandSubscriber: Subscriber { private var subscription: Subscription? let subject = PassthroughSubject() @@ -257,7 +257,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testDemandNoneReceivesNoElement() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -292,7 +292,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testDemandOneReceivesOneElement() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -330,7 +330,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testDemandOneDoesNotReceiveTwoElements() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -372,7 +372,7 @@ class ValueObservationPublisherTests : XCTestCase { } func testDemandTwoReceivesTwoElements() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -418,7 +418,7 @@ class ValueObservationPublisherTests : XCTestCase { /// Regression test for https://github.com/groue/GRDB.swift/issues/1194 func testIssue1194() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } diff --git a/Tests/GRDBTests/AssociationAggregateTests.swift b/Tests/GRDBTests/AssociationAggregateTests.swift index 8d4333a260..a8875291ae 100644 --- a/Tests/GRDBTests/AssociationAggregateTests.swift +++ b/Tests/GRDBTests/AssociationAggregateTests.swift @@ -51,13 +51,13 @@ class AssociationAggregateTests: GRDBTestCase { } try db.create(table: "player") { t in t.primaryKey("id", .integer) - t.column("teamId", .integer).references("team") + t.belongsTo("team") t.column("name", .text) t.column("score", .integer) } try db.create(table: "award") { t in t.primaryKey("customPrimaryKey", .integer) - t.column("teamId", .integer).references("team") + t.belongsTo("team") t.column("name", .text) } @@ -1511,6 +1511,30 @@ class AssociationAggregateTests: GRDBTestCase { } } + func testCast() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.read { db in + do { + let request = Team.annotated(with: cast(Team.players.count, as: .real)) + try assertEqualSQL(db, request, """ + SELECT "team".*, CAST(COUNT(DISTINCT "player"."id") AS REAL) AS "playerCount" \ + FROM "team" \ + LEFT JOIN "player" ON "player"."teamId" = "team"."id" \ + GROUP BY "team"."id" + """) + } + do { + let request = Team.annotated(with: cast(Team.players.count, as: .real).forKey("foo")) + try assertEqualSQL(db, request, """ + SELECT "team".*, CAST(COUNT(DISTINCT "player"."id") AS REAL) AS "foo" \ + FROM "team" \ + LEFT JOIN "player" ON "player"."teamId" = "team"."id" \ + GROUP BY "team"."id" + """) + } + } + } + func testLength() throws { let dbQueue = try makeDatabaseQueue() try dbQueue.read { db in diff --git a/Tests/GRDBTests/AssociationBelongsToDecodableRecordTests.swift b/Tests/GRDBTests/AssociationBelongsToDecodableRecordTests.swift index 161e8e1485..bdfac08273 100644 --- a/Tests/GRDBTests/AssociationBelongsToDecodableRecordTests.swift +++ b/Tests/GRDBTests/AssociationBelongsToDecodableRecordTests.swift @@ -55,7 +55,7 @@ class AssociationBelongsToDecodableRecordTests: GRDBTestCase { } try db.create(table: "players") { t in t.primaryKey("id", .integer) - t.column("teamId", .integer).references("teams") + t.belongsTo("team") t.column("name", .text) } diff --git a/Tests/GRDBTests/AssociationBelongsToFetchableRecordTests.swift b/Tests/GRDBTests/AssociationBelongsToFetchableRecordTests.swift index 06e63f47ab..e288e6c0cf 100644 --- a/Tests/GRDBTests/AssociationBelongsToFetchableRecordTests.swift +++ b/Tests/GRDBTests/AssociationBelongsToFetchableRecordTests.swift @@ -57,7 +57,7 @@ class AssociationBelongsToFetchableRecordTests: GRDBTestCase { } try db.create(table: "players") { t in t.primaryKey("id", .integer) - t.column("teamId", .integer).references("teams") + t.belongsTo("team") t.column("name", .text) } diff --git a/Tests/GRDBTests/AssociationBelongsToRowScopeTests.swift b/Tests/GRDBTests/AssociationBelongsToRowScopeTests.swift index b7f826ab01..813a993e27 100644 --- a/Tests/GRDBTests/AssociationBelongsToRowScopeTests.swift +++ b/Tests/GRDBTests/AssociationBelongsToRowScopeTests.swift @@ -27,7 +27,7 @@ class AssociationBelongsToRowScopeTests: GRDBTestCase { } try db.create(table: "players") { t in t.primaryKey("id", .integer) - t.column("teamId", .integer).references("teams") + t.belongsTo("team") t.column("name", .text) } diff --git a/Tests/GRDBTests/AssociationBelongsToSQLDerivationTests.swift b/Tests/GRDBTests/AssociationBelongsToSQLDerivationTests.swift index 49a81fa251..b51ec961ec 100644 --- a/Tests/GRDBTests/AssociationBelongsToSQLDerivationTests.swift +++ b/Tests/GRDBTests/AssociationBelongsToSQLDerivationTests.swift @@ -16,12 +16,12 @@ private struct B : TableRecord { private struct RestrictedB : TableRecord { static let databaseTableName = "b" - static let databaseSelection: [SQLSelectable] = [Column("name")] + static let databaseSelection: [any SQLSelectable] = [Column("name")] } private struct ExtendedB : TableRecord { static let databaseTableName = "b" - static let databaseSelection: [SQLSelectable] = [AllColumns(), Column.rowID] + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] } /// Test SQL generation diff --git a/Tests/GRDBTests/AssociationBelongsToSQLTests.swift b/Tests/GRDBTests/AssociationBelongsToSQLTests.swift index c569047326..2e44dfc364 100644 --- a/Tests/GRDBTests/AssociationBelongsToSQLTests.swift +++ b/Tests/GRDBTests/AssociationBelongsToSQLTests.swift @@ -330,7 +330,7 @@ class AssociationBelongsToSQLTests: GRDBTestCase { t.column("name", .text) } try db.create(table: "children") { t in - t.column("parentId", .integer).references("parents") + t.belongsTo("parent") } } @@ -546,8 +546,8 @@ class AssociationBelongsToSQLTests: GRDBTestCase { t.column("name", .text) } try db.create(table: "children") { t in - t.column("parent1Id", .integer).references("parents") - t.column("parent2Id", .integer).references("parents") + t.belongsTo("parent1", inTable: "parents") + t.belongsTo("parent2", inTable: "parents") } } @@ -1733,6 +1733,81 @@ class AssociationBelongsToSQLTests: GRDBTestCase { } } + func testTableBelongsToView() throws { + try makeDatabaseQueue().write { db in + try db.execute(sql: """ + CREATE TABLE child (foo); + CREATE VIEW parent AS SELECT 1 AS bar; + """) + + let child = Table("child") + let parent = Table("parent") + let foreignKey = ForeignKey(["foo"], to: ["bar"]) + let association = child.belongsTo(parent, using: foreignKey) + + try assertEqualSQL(db, child.joining(required: association), """ + SELECT "child".* \ + FROM "child" \ + JOIN "parent" ON "parent"."bar" = "child"."foo" + """) + try assertEqualSQL(db, child.joining(optional: association), """ + SELECT "child".* \ + FROM "child" \ + LEFT JOIN "parent" ON "parent"."bar" = "child"."foo" + """) + } + } + + func testViewBelongsToTable() throws { + try makeDatabaseQueue().write { db in + try db.execute(sql: """ + CREATE VIEW child AS SELECT 1 AS foo; + CREATE TABLE parent(id INTEGER PRIMARY KEY); + """) + + let child = Table("child") + let parent = Table("parent") + let foreignKey = ForeignKey(["foo"]) + let association = child.belongsTo(parent, using: foreignKey) + + try assertEqualSQL(db, child.joining(required: association), """ + SELECT "child".* \ + FROM "child" \ + JOIN "parent" ON "parent"."id" = "child"."foo" + """) + try assertEqualSQL(db, child.joining(optional: association), """ + SELECT "child".* \ + FROM "child" \ + LEFT JOIN "parent" ON "parent"."id" = "child"."foo" + """) + } + } + + func testViewBelongsToView() throws { + try makeDatabaseQueue().write { db in + try db.execute(sql: """ + CREATE VIEW child AS SELECT 1 AS foo; + CREATE VIEW parent AS SELECT 1 AS bar; + """) + + let child = Table("child") + let parent = Table("parent") + let foreignKey = ForeignKey(["foo"], to: ["bar"]) + let association = child.belongsTo(parent, using: foreignKey) + + try assertEqualSQL(db, child.joining(required: association), """ + SELECT "child".* \ + FROM "child" \ + JOIN "parent" ON "parent"."bar" = "child"."foo" + """) + try assertEqualSQL(db, child.joining(optional: association), """ + SELECT "child".* \ + FROM "child" \ + LEFT JOIN "parent" ON "parent"."bar" = "child"."foo" + """) + } + } + // Regression test for https://github.com/groue/GRDB.swift/issues/495 func testFetchCount() throws { let dbQueue = try makeDatabaseQueue() @@ -1742,7 +1817,7 @@ class AssociationBelongsToSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId", .integer).references("a") + t.belongsTo("a") } struct A: TableRecord { } struct B: TableRecord { } @@ -1771,7 +1846,7 @@ class AssociationBelongsToSQLTests: GRDBTestCase { t.column("name", .text) } try db.create(table: "children") { t in - t.column("parentId", .integer).references("parents") + t.belongsTo("parent") } } diff --git a/Tests/GRDBTests/AssociationHasManyOrderingTests.swift b/Tests/GRDBTests/AssociationHasManyOrderingTests.swift index 5feb757c99..dff328b69f 100644 --- a/Tests/GRDBTests/AssociationHasManyOrderingTests.swift +++ b/Tests/GRDBTests/AssociationHasManyOrderingTests.swift @@ -36,7 +36,7 @@ class AssociationHasManyOrderingTests: GRDBTestCase { } try db.create(table: "player") { t in t.primaryKey("id", .integer) - t.column("teamId", .integer).notNull().references("team") + t.belongsTo("team").notNull() t.column("name", .text).notNull() t.column("position", .integer).notNull() } diff --git a/Tests/GRDBTests/AssociationHasManyRowScopeTests.swift b/Tests/GRDBTests/AssociationHasManyRowScopeTests.swift index a3d244ea97..69f450aa7c 100644 --- a/Tests/GRDBTests/AssociationHasManyRowScopeTests.swift +++ b/Tests/GRDBTests/AssociationHasManyRowScopeTests.swift @@ -18,7 +18,7 @@ class AssociationHasManyRowScopeTests: GRDBTestCase { } try db.create(table: "child") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("parent") + t.belongsTo("parent") } try db.execute(sql: """ INSERT INTO parent (id) VALUES (1); @@ -49,7 +49,7 @@ class AssociationHasManyRowScopeTests: GRDBTestCase { } try db.create(table: "children") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("parents") + t.belongsTo("parent") } try db.execute(sql: """ INSERT INTO parents (id) VALUES (1); @@ -79,7 +79,7 @@ class AssociationHasManyRowScopeTests: GRDBTestCase { } try db.create(table: "child") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("parent") + t.belongsTo("parent") } try db.execute(sql: """ INSERT INTO parent (id) VALUES (1); diff --git a/Tests/GRDBTests/AssociationHasManySQLTests.swift b/Tests/GRDBTests/AssociationHasManySQLTests.swift index ffc3fcaab4..f56133ab0c 100644 --- a/Tests/GRDBTests/AssociationHasManySQLTests.swift +++ b/Tests/GRDBTests/AssociationHasManySQLTests.swift @@ -217,7 +217,7 @@ class AssociationHasManySQLTests: GRDBTestCase { t.primaryKey("id", .integer) } try db.create(table: "children") { t in - t.column("parentId", .integer).references("parents") + t.belongsTo("parent") } } @@ -346,8 +346,8 @@ class AssociationHasManySQLTests: GRDBTestCase { t.primaryKey("id", .integer) } try db.create(table: "children") { t in - t.column("parent1Id", .integer).references("parents") - t.column("parent2Id", .integer).references("parents") + t.belongsTo("parent1", inTable: "parents") + t.belongsTo("parent2", inTable: "parents") } } @@ -1074,10 +1074,10 @@ class AssociationHasManySQLTests: GRDBTestCase { } try db.create(table: "child") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("parent") + t.belongsTo("parent") } try db.create(table: "toy") { t in - t.column("childId", .integer).references("child") + t.belongsTo("child") } } diff --git a/Tests/GRDBTests/AssociationHasManyThroughOrderingTests.swift b/Tests/GRDBTests/AssociationHasManyThroughOrderingTests.swift index 79be3fa5ed..0b99461925 100644 --- a/Tests/GRDBTests/AssociationHasManyThroughOrderingTests.swift +++ b/Tests/GRDBTests/AssociationHasManyThroughOrderingTests.swift @@ -40,8 +40,8 @@ class AssociationHasManyThroughOrderingTests: GRDBTestCase { t.column("name", .text).notNull() } try db.create(table: "playerRole") { t in - t.column("teamId", .integer).notNull().references("team") - t.column("playerId", .integer).notNull().references("player") + t.belongsTo("team").notNull() + t.belongsTo("player").notNull() t.column("position", .integer).notNull() t.primaryKey(["teamId", "playerId"]) } diff --git a/Tests/GRDBTests/AssociationHasManyThroughRowScopeTests.swift b/Tests/GRDBTests/AssociationHasManyThroughRowScopeTests.swift index 215720a6b6..fbe43d7b5c 100644 --- a/Tests/GRDBTests/AssociationHasManyThroughRowScopeTests.swift +++ b/Tests/GRDBTests/AssociationHasManyThroughRowScopeTests.swift @@ -23,11 +23,11 @@ class AssociationHasManyThroughRowScopeTests: GRDBTestCase { } try db.create(table: "parent") { t in t.autoIncrementedPrimaryKey("id") - t.column("childId").references("child") + t.belongsTo("child") } try db.create(table: "grandChild") { t in t.autoIncrementedPrimaryKey("id") - t.column("childId").references("child") + t.belongsTo("child") } try db.execute(sql: """ INSERT INTO child (id) VALUES (1); @@ -64,11 +64,11 @@ class AssociationHasManyThroughRowScopeTests: GRDBTestCase { } try db.create(table: "parents") { t in t.autoIncrementedPrimaryKey("id") - t.column("childId").references("children") + t.belongsTo("child") } try db.create(table: "grandChildren") { t in t.autoIncrementedPrimaryKey("id") - t.column("childId").references("children") + t.belongsTo("child") } try db.execute(sql: """ INSERT INTO children (id) VALUES (1); @@ -106,11 +106,11 @@ class AssociationHasManyThroughRowScopeTests: GRDBTestCase { } try db.create(table: "parents") { t in t.autoIncrementedPrimaryKey("id") - t.column("childId").references("children") + t.belongsTo("child") } try db.create(table: "grandChildren") { t in t.autoIncrementedPrimaryKey("id") - t.column("childId").references("children") + t.belongsTo("child") } try db.execute(sql: """ INSERT INTO children (id) VALUES (1); @@ -157,8 +157,8 @@ class AssociationHasManyThroughRowScopeTests: GRDBTestCase { } try db.create(table: "child") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId").references("parent") - t.column("grandChildId").references("grandChild") + t.belongsTo("parent") + t.belongsTo("grandChild") } try db.execute(sql: """ INSERT INTO parent (id) VALUES (1); @@ -198,8 +198,8 @@ class AssociationHasManyThroughRowScopeTests: GRDBTestCase { } try db.create(table: "children") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId").references("parents") - t.column("grandChildId").references("grandChildren") + t.belongsTo("parent") + t.belongsTo("grandChild") } try db.execute(sql: """ INSERT INTO parents (id) VALUES (1); @@ -237,8 +237,8 @@ class AssociationHasManyThroughRowScopeTests: GRDBTestCase { } try db.create(table: "child") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId").references("parent") - t.column("grandChildId").references("grandChild") + t.belongsTo("parent") + t.belongsTo("grandChild") } try db.execute(sql: """ INSERT INTO parent (id) VALUES (1); diff --git a/Tests/GRDBTests/AssociationHasManyThroughSQLTests.swift b/Tests/GRDBTests/AssociationHasManyThroughSQLTests.swift index d054a57c20..816e8beac7 100644 --- a/Tests/GRDBTests/AssociationHasManyThroughSQLTests.swift +++ b/Tests/GRDBTests/AssociationHasManyThroughSQLTests.swift @@ -26,11 +26,11 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasManyWithTwoSteps( @@ -66,11 +66,11 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") + t.belongsTo("a") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasManyWithTwoSteps( @@ -109,8 +109,8 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") - t.column("aId").references("a") + t.belongsTo("c") + t.belongsTo("a") } try testHasManyWithTwoSteps( @@ -146,11 +146,11 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") + t.belongsTo("a") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasManyWithTwoSteps( @@ -334,13 +334,13 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "child") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("parent") + t.belongsTo("parent") } try db.create(table: "toy") { t in - t.column("childId", .integer).references("child") + t.belongsTo("child") } try db.create(table: "pet") { t in - t.column("childId", .integer).references("child") + t.belongsTo("child") } } @@ -462,15 +462,15 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") + t.belongsTo("a") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try db.create(table: "d") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try testHasManyWithThreeSteps( @@ -521,15 +521,15 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try db.create(table: "d") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasManyWithThreeSteps( @@ -581,12 +581,12 @@ class AssociationHasManyThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") + t.belongsTo("a") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") - t.column("dId").references("d") + t.belongsTo("b") + t.belongsTo("d") } try testHasManyWithThreeSteps( diff --git a/Tests/GRDBTests/AssociationHasOneSQLDerivationTests.swift b/Tests/GRDBTests/AssociationHasOneSQLDerivationTests.swift index 9165292b1e..932167d5eb 100644 --- a/Tests/GRDBTests/AssociationHasOneSQLDerivationTests.swift +++ b/Tests/GRDBTests/AssociationHasOneSQLDerivationTests.swift @@ -16,12 +16,12 @@ private struct B : TableRecord { private struct RestrictedB : TableRecord { static let databaseTableName = "b" - static let databaseSelection: [SQLSelectable] = [Column("name")] + static let databaseSelection: [any SQLSelectable] = [Column("name")] } private struct ExtendedB : TableRecord { static let databaseTableName = "b" - static let databaseSelection: [SQLSelectable] = [AllColumns(), Column.rowID] + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] } /// Test SQL generation diff --git a/Tests/GRDBTests/AssociationHasOneSQLTests.swift b/Tests/GRDBTests/AssociationHasOneSQLTests.swift index f61c05772f..6d74207877 100644 --- a/Tests/GRDBTests/AssociationHasOneSQLTests.swift +++ b/Tests/GRDBTests/AssociationHasOneSQLTests.swift @@ -189,7 +189,7 @@ class AssociationHasOneSQLTests: GRDBTestCase { t.primaryKey("id", .integer) } try db.create(table: "children") { t in - t.column("parentId", .integer).references("parents") + t.belongsTo("parent") } } @@ -299,8 +299,8 @@ class AssociationHasOneSQLTests: GRDBTestCase { t.primaryKey("id", .integer) } try db.create(table: "children") { t in - t.column("parent1Id", .integer).references("parents") - t.column("parent2Id", .integer).references("parents") + t.belongsTo("parent1", inTable: "parents") + t.belongsTo("parent2", inTable: "parents") } } diff --git a/Tests/GRDBTests/AssociationHasOneThroughDecodableRecordTests.swift b/Tests/GRDBTests/AssociationHasOneThroughDecodableRecordTests.swift index dccbce7b4e..2a976c2629 100644 --- a/Tests/GRDBTests/AssociationHasOneThroughDecodableRecordTests.swift +++ b/Tests/GRDBTests/AssociationHasOneThroughDecodableRecordTests.swift @@ -49,12 +49,12 @@ class AssociationHasOneThroughDecodableRecordTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") t.column("name", .text) } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") t.column("name", .text) } diff --git a/Tests/GRDBTests/AssociationHasOneThroughFetchableRecordTests.swift b/Tests/GRDBTests/AssociationHasOneThroughFetchableRecordTests.swift index ee45837bd3..d0a49bffff 100644 --- a/Tests/GRDBTests/AssociationHasOneThroughFetchableRecordTests.swift +++ b/Tests/GRDBTests/AssociationHasOneThroughFetchableRecordTests.swift @@ -86,12 +86,12 @@ class AssociationHasOneThroughFetchableRecordTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") t.column("name", .text) } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") t.column("name", .text) } diff --git a/Tests/GRDBTests/AssociationHasOneThroughRowScopeTests.swift b/Tests/GRDBTests/AssociationHasOneThroughRowScopeTests.swift index 05981b01fa..f5138711fc 100644 --- a/Tests/GRDBTests/AssociationHasOneThroughRowScopeTests.swift +++ b/Tests/GRDBTests/AssociationHasOneThroughRowScopeTests.swift @@ -35,12 +35,12 @@ class AssociationHasOneThroughRowscopeTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") t.column("name", .text) } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") t.column("name", .text) } diff --git a/Tests/GRDBTests/AssociationHasOneThroughSQLDerivationTests.swift b/Tests/GRDBTests/AssociationHasOneThroughSQLDerivationTests.swift index a9f4d9f076..ce9a335e02 100644 --- a/Tests/GRDBTests/AssociationHasOneThroughSQLDerivationTests.swift +++ b/Tests/GRDBTests/AssociationHasOneThroughSQLDerivationTests.swift @@ -19,12 +19,12 @@ private struct C: TableRecord { private struct RestrictedC : TableRecord { static let databaseTableName = "c" - static let databaseSelection: [SQLSelectable] = [Column("name")] + static let databaseSelection: [any SQLSelectable] = [Column("name")] } private struct ExtendedC : TableRecord { static let databaseTableName = "c" - static let databaseSelection: [SQLSelectable] = [AllColumns(), Column.rowID] + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] } /// Test SQL generation @@ -38,11 +38,11 @@ class AssociationHasOneThroughSQLDerivationTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } } } diff --git a/Tests/GRDBTests/AssociationHasOneThroughSQLTests.swift b/Tests/GRDBTests/AssociationHasOneThroughSQLTests.swift index 368aadd967..a9521cc5cf 100644 --- a/Tests/GRDBTests/AssociationHasOneThroughSQLTests.swift +++ b/Tests/GRDBTests/AssociationHasOneThroughSQLTests.swift @@ -28,11 +28,11 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasOneWithTwoSteps( @@ -68,11 +68,11 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasOneWithTwoSteps( @@ -111,8 +111,8 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") - t.column("aId").references("a") + t.belongsTo("c") + t.belongsTo("a") } try testHasOneWithTwoSteps( @@ -148,11 +148,11 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") + t.belongsTo("a") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasOneWithTwoSteps( @@ -437,15 +437,15 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("dId").references("d") + t.belongsTo("d") } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasOneWithThreeSteps( @@ -494,15 +494,15 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "d") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasOneWithThreeSteps( @@ -554,12 +554,12 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") - t.column("dId").references("d") + t.belongsTo("b") + t.belongsTo("d") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasOneWithThreeSteps( @@ -608,15 +608,15 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try db.create(table: "d") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try testHasOneWithThreeSteps( @@ -668,12 +668,12 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("dId").references("d") + t.belongsTo("d") } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") - t.column("cId").references("c") + t.belongsTo("a") + t.belongsTo("c") } try testHasOneWithThreeSteps( @@ -725,12 +725,12 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "d") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") - t.column("cId").references("c") + t.belongsTo("a") + t.belongsTo("c") } try testHasOneWithThreeSteps( @@ -782,12 +782,12 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") + t.belongsTo("a") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") - t.column("dId").references("d") + t.belongsTo("b") + t.belongsTo("d") } try testHasOneWithThreeSteps( @@ -836,15 +836,15 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("aId").references("a") + t.belongsTo("a") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } try db.create(table: "d") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try testHasOneWithThreeSteps( @@ -1724,19 +1724,19 @@ class AssociationHasOneThroughSQLTests: GRDBTestCase { } try db.create(table: "d") { t in t.autoIncrementedPrimaryKey("id") - t.column("eId").references("e") + t.belongsTo("e") } try db.create(table: "c") { t in t.autoIncrementedPrimaryKey("id") - t.column("dId").references("d") + t.belongsTo("d") } try db.create(table: "b") { t in t.autoIncrementedPrimaryKey("id") - t.column("cId").references("c") + t.belongsTo("c") } try db.create(table: "a") { t in t.autoIncrementedPrimaryKey("id") - t.column("bId").references("b") + t.belongsTo("b") } let associations = [ diff --git a/Tests/GRDBTests/AssociationPrefetchingCodableRecordTests.swift b/Tests/GRDBTests/AssociationPrefetchingCodableRecordTests.swift index f981209bae..7877a5e2dd 100644 --- a/Tests/GRDBTests/AssociationPrefetchingCodableRecordTests.swift +++ b/Tests/GRDBTests/AssociationPrefetchingCodableRecordTests.swift @@ -1350,9 +1350,7 @@ class AssociationPrefetchingCodableRecordTests: GRDBTestCase { try dbQueue.write { db in try db.create(table: "employee") { t in t.autoIncrementedPrimaryKey("id") - t.column("managerId", .integer) - .indexed() - .references("employee", onDelete: .restrict) + t.belongsTo("manager", inTable: "employee", onDelete: .restrict) t.column("name", .text) } try db.execute(sql: """ diff --git a/Tests/GRDBTests/AssociationPrefetchingRelationTests.swift b/Tests/GRDBTests/AssociationPrefetchingRelationTests.swift index cd12610f27..f67811c9fe 100644 --- a/Tests/GRDBTests/AssociationPrefetchingRelationTests.swift +++ b/Tests/GRDBTests/AssociationPrefetchingRelationTests.swift @@ -690,4 +690,47 @@ class AssociationPrefetchingRelationTests: GRDBTestCase { } } } + + // Regression test for + func testIssue1315() throws { + struct A: TableRecord { + static let b = belongsTo(B.self) + } + struct B: TableRecord { + static let c = hasOne(C.self) + static let e = hasOne(E.self) + } + struct C: TableRecord { + static let ds = hasMany(D.self) + } + struct D: TableRecord { } + struct E: TableRecord { + static let fs = hasMany(F.self) + } + struct F: TableRecord { } + + try DatabaseQueue().write { db in + try db.execute(sql: """ + CREATE TABLE b (id INTEGER PRIMARY KEY); + CREATE TABLE a (id INTEGER PRIMARY KEY, bId INTEGER REFERENCES b(id)); + CREATE TABLE c (id INTEGER PRIMARY KEY, bId INTEGER REFERENCES b(id)); + CREATE TABLE d (id INTEGER PRIMARY KEY, cId INTEGER REFERENCES c(id)); + CREATE TABLE e (id INTEGER PRIMARY KEY, bId INTEGER REFERENCES b(id)); + CREATE TABLE f (id INTEGER PRIMARY KEY, eId INTEGER REFERENCES e(id)); + """) + + let region = try A + .including(required: A.b + .including(optional: B.c.including(all: C.ds)) + .including(optional: B.e.including(all: E.fs))) + .databaseRegion(db) + + XCTAssertTrue(region.isModified(byEventsOfKind: .insert(tableName: "a"))) + XCTAssertTrue(region.isModified(byEventsOfKind: .insert(tableName: "b"))) + XCTAssertTrue(region.isModified(byEventsOfKind: .insert(tableName: "c"))) + XCTAssertTrue(region.isModified(byEventsOfKind: .insert(tableName: "d"))) + XCTAssertTrue(region.isModified(byEventsOfKind: .insert(tableName: "e"))) + XCTAssertTrue(region.isModified(byEventsOfKind: .insert(tableName: "f"))) + } + } } diff --git a/Tests/GRDBTests/ColumnExpressionTests.swift b/Tests/GRDBTests/ColumnExpressionTests.swift index 0cc77811b3..b716a07002 100644 --- a/Tests/GRDBTests/ColumnExpressionTests.swift +++ b/Tests/GRDBTests/ColumnExpressionTests.swift @@ -20,7 +20,7 @@ class ColumnExpressionTests: GRDBTestCase { } // Test databaseSelection - static let databaseSelection: [SQLSelectable] = [Columns.id, Columns.name, Columns.score] + static let databaseSelection: [any SQLSelectable] = [Columns.id, Columns.name, Columns.score] init(row: Row) { // Test row subscript @@ -80,7 +80,7 @@ class ColumnExpressionTests: GRDBTestCase { } // Test databaseSelection - static let databaseSelection: [SQLSelectable] = [Columns.id, Columns.name, Columns.score] + static let databaseSelection: [any SQLSelectable] = [Columns.id, Columns.name, Columns.score] init(row: Row) { // Test row subscript @@ -148,7 +148,7 @@ class ColumnExpressionTests: GRDBTestCase { } // Test databaseSelection - static let databaseSelection: [SQLSelectable] = [Columns.id, Columns.name, Columns.score] + static let databaseSelection: [any SQLSelectable] = [Columns.id, Columns.name, Columns.score] static var testRequest: QueryInterfaceRequest { // Test expression derivation @@ -196,7 +196,7 @@ class ColumnExpressionTests: GRDBTestCase { } // Test databaseSelection - static let databaseSelection: [SQLSelectable] = [CodingKeys.id, CodingKeys.name, CodingKeys.score] + static let databaseSelection: [any SQLSelectable] = [CodingKeys.id, CodingKeys.name, CodingKeys.score] static var testRequest: QueryInterfaceRequest { // Test expression derivation diff --git a/Tests/GRDBTests/ColumnInfoTests.swift b/Tests/GRDBTests/ColumnInfoTests.swift index 87b759d7cb..3951b81a1e 100644 --- a/Tests/GRDBTests/ColumnInfoTests.swift +++ b/Tests/GRDBTests/ColumnInfoTests.swift @@ -19,77 +19,96 @@ class ColumnInfoTests: GRDBTestCase { i DATETIME DEFAULT CURRENT_TIMESTAMP, j DATE DEFAULT (DATETIME('now', 'localtime')), "" fooéı👨👨🏿🇫🇷🇨🇮, + untyped, PRIMARY KEY(c, a) ) """) let columns = try db.columns(in: "t") - XCTAssertEqual(columns.count, 11) + XCTAssertEqual(columns.count, 12) XCTAssertEqual(columns[0].name, "a") XCTAssertEqual(columns[0].isNotNull, false) XCTAssertEqual(columns[0].type, "INT") + XCTAssertEqual(columns[0].columnType?.rawValue, "INT") XCTAssertEqual(columns[0].primaryKeyIndex, 2) XCTAssertNil(columns[0].defaultValueSQL) XCTAssertEqual(columns[1].name, "b") XCTAssertEqual(columns[1].isNotNull, false) XCTAssertEqual(columns[1].type, "TEXT") + XCTAssertEqual(columns[1].columnType?.rawValue, "TEXT") XCTAssertEqual(columns[1].primaryKeyIndex, 0) XCTAssertNil(columns[1].defaultValueSQL) XCTAssertEqual(columns[2].name, "c") XCTAssertEqual(columns[2].isNotNull, false) XCTAssertEqual(columns[2].type, "VARCHAR(10)") + XCTAssertEqual(columns[2].columnType?.rawValue, "VARCHAR(10)") XCTAssertEqual(columns[2].primaryKeyIndex, 1) XCTAssertNil(columns[2].defaultValueSQL) XCTAssertEqual(columns[3].name, "d") XCTAssertEqual(columns[3].isNotNull, false) XCTAssertEqual(columns[3].type.uppercased(), "INT") // "int" or "INT" depending of SQLite version + XCTAssertEqual(columns[3].columnType?.rawValue.uppercased(), "INT") // "int" or "INT" depending of SQLite version XCTAssertEqual(columns[3].primaryKeyIndex, 0) XCTAssertEqual(columns[3].defaultValueSQL, "NULL") XCTAssertEqual(columns[4].name, "e") XCTAssertEqual(columns[4].isNotNull, true) XCTAssertEqual(columns[4].type.uppercased(), "TEXT") // "Text" or "TEXT" depending of SQLite version + XCTAssertEqual(columns[4].columnType?.rawValue.uppercased(), "TEXT") // "Text" or "TEXT" depending of SQLite version XCTAssertEqual(columns[4].primaryKeyIndex, 0) XCTAssertEqual(columns[4].defaultValueSQL, "'foo'") XCTAssertEqual(columns[5].name, "fooéı👨👨🏿🇫🇷🇨🇮") XCTAssertEqual(columns[5].isNotNull, false) XCTAssertEqual(columns[5].type, "INT") + XCTAssertEqual(columns[5].columnType?.rawValue, "INT") XCTAssertEqual(columns[5].primaryKeyIndex, 0) XCTAssertEqual(columns[5].defaultValueSQL, "0") XCTAssertEqual(columns[6].name, "g") XCTAssertEqual(columns[6].isNotNull, false) XCTAssertEqual(columns[6].type, "INT") + XCTAssertEqual(columns[6].columnType?.rawValue, "INT") XCTAssertEqual(columns[6].primaryKeyIndex, 0) XCTAssertEqual(columns[6].defaultValueSQL, "1e6") XCTAssertEqual(columns[7].name, "h") XCTAssertEqual(columns[7].isNotNull, false) XCTAssertEqual(columns[7].type, "REAL") + XCTAssertEqual(columns[7].columnType?.rawValue, "REAL") XCTAssertEqual(columns[7].primaryKeyIndex, 0) XCTAssertEqual(columns[7].defaultValueSQL, "1.0") XCTAssertEqual(columns[8].name, "i") XCTAssertEqual(columns[8].isNotNull, false) XCTAssertEqual(columns[8].type, "DATETIME") + XCTAssertEqual(columns[8].columnType?.rawValue, "DATETIME") XCTAssertEqual(columns[8].primaryKeyIndex, 0) XCTAssertEqual(columns[8].defaultValueSQL, "CURRENT_TIMESTAMP") XCTAssertEqual(columns[9].name, "j") XCTAssertEqual(columns[9].isNotNull, false) XCTAssertEqual(columns[9].type, "DATE") + XCTAssertEqual(columns[9].columnType?.rawValue, "DATE") XCTAssertEqual(columns[9].primaryKeyIndex, 0) XCTAssertEqual(columns[9].defaultValueSQL, "DATETIME('now', 'localtime')") XCTAssertEqual(columns[10].name, "") XCTAssertEqual(columns[10].isNotNull, false) XCTAssertEqual(columns[10].type, "fooéı👨👨🏿🇫🇷🇨🇮") + XCTAssertEqual(columns[10].columnType?.rawValue, "fooéı👨👨🏿🇫🇷🇨🇮") XCTAssertEqual(columns[10].primaryKeyIndex, 0) XCTAssertNil(columns[10].defaultValueSQL) + + XCTAssertEqual(columns[11].name, "untyped") + XCTAssertEqual(columns[11].isNotNull, false) + XCTAssertEqual(columns[11].type, "") + XCTAssertNil(columns[11].columnType) + XCTAssertEqual(columns[11].primaryKeyIndex, 0) + XCTAssertNil(columns[11].defaultValueSQL) } } @@ -270,4 +289,106 @@ class ColumnInfoTests: GRDBTestCase { _ = try db.columns(in: "t1") } } + + func testUnknownSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: "CREATE TABLE t (id INTEGER)") + do { + _ = try db.columns(in: "t", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + } + } + + func testSpecifiedMainSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: "CREATE TABLE t (id INTEGER)") + let columns = try db.columns(in: "t") + XCTAssertEqual(columns.count, 1) + XCTAssertEqual(columns[0].name, "id") + XCTAssertEqual(columns[0].type, "INTEGER") + } + } + + func testSpecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE t (id2 TEXT)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: "CREATE TABLE t (id1 INTEGER)") + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + let columnsMain = try db.columns(in: "t", in: "main") + XCTAssertEqual(columnsMain.count, 1) + XCTAssertEqual(columnsMain[0].name, "id1") + XCTAssertEqual(columnsMain[0].type, "INTEGER") + + let columnsAttached = try db.columns(in: "t", in: "attached") + XCTAssertEqual(columnsAttached.count, 1) + XCTAssertEqual(columnsAttached[0].name, "id2") + XCTAssertEqual(columnsAttached[0].type, "TEXT") + } + } + + // The `t` table in the attached database should never + // be found unless explicitly specified as it is after + // `main.t` in resolution order. + func testUnspecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE t (id2 TEXT)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: "CREATE TABLE t (id1 INTEGER)") + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + let columnsMain = try db.columns(in: "t") + XCTAssertEqual(columnsMain.count, 1) + XCTAssertEqual(columnsMain[0].name, "id1") + XCTAssertEqual(columnsMain[0].type, "INTEGER") + } + } + + func testUnspecifiedSchemaFindsAttachedDatabase() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE t (id2 TEXT)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + let columnsMain = try db.columns(in: "t") + XCTAssertEqual(columnsMain.count, 1) + XCTAssertEqual(columnsMain[0].name, "id2") + XCTAssertEqual(columnsMain[0].type, "TEXT") + } + } } diff --git a/Tests/GRDBTests/CommonTableExpressionTests.swift b/Tests/GRDBTests/CommonTableExpressionTests.swift index c12a52ebe3..448b06add6 100644 --- a/Tests/GRDBTests/CommonTableExpressionTests.swift +++ b/Tests/GRDBTests/CommonTableExpressionTests.swift @@ -631,11 +631,11 @@ class CommonTableExpressionTests: GRDBTestCase { } try db.create(table: "player") { t in t.autoIncrementedPrimaryKey("id") - t.column("teamId", .integer).references("team") + t.belongsTo("team") } try db.create(table: "award") { t in t.autoIncrementedPrimaryKey("id") - t.column("playerId", .integer).references("player") + t.belongsTo("player") } struct Team: TableRecord { } struct Player: TableRecord { diff --git a/Tests/GRDBTests/CompilationProtocolTests.swift b/Tests/GRDBTests/CompilationProtocolTests.swift index 589627683d..183fd89edb 100644 --- a/Tests/GRDBTests/CompilationProtocolTests.swift +++ b/Tests/GRDBTests/CompilationProtocolTests.swift @@ -27,13 +27,13 @@ private struct UserDatabaseAggregate1 : DatabaseAggregate { let a: Int? init() { a = nil } mutating func step(_ dbValues: [DatabaseValue]) throws { } - func finalize() throws -> DatabaseValueConvertible? { preconditionFailure() } + func finalize() throws -> (any DatabaseValueConvertible)? { preconditionFailure() } } private class UserDatabaseAggregate2 : DatabaseAggregate { required init() { } func step(_ dbValues: [DatabaseValue]) throws { } - func finalize() throws -> DatabaseValueConvertible? { preconditionFailure() } + func finalize() throws -> (any DatabaseValueConvertible)? { preconditionFailure() } } // MARK: - DatabaseValueConvertible @@ -52,7 +52,7 @@ private class UserDatabaseValueConvertible2 : DatabaseValueConvertible { #if SQLITE_ENABLE_FTS5 private class UserFTS5Tokenizer : FTS5Tokenizer { - func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: CInt, tokenCallback: @escaping FTS5TokenCallback) -> CInt { preconditionFailure() } + func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: CInt, tokenCallback: @escaping FTS5TokenCallback) -> CInt { preconditionFailure() } } #endif @@ -62,7 +62,7 @@ private class UserFTS5Tokenizer : FTS5Tokenizer { private class UserFTS5CustomTokenizer : FTS5CustomTokenizer { static let name: String = "UserFTS5CustomTokenizer" required init(db: Database, arguments: [String]) throws { preconditionFailure() } - func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: CInt, tokenCallback: @escaping FTS5TokenCallback) -> CInt { preconditionFailure() } + func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: CInt, tokenCallback: @escaping FTS5TokenCallback) -> CInt { preconditionFailure() } } #endif @@ -71,7 +71,7 @@ private class UserFTS5CustomTokenizer : FTS5CustomTokenizer { #if SQLITE_ENABLE_FTS5 private class UserFTS5WrapperTokenizer : FTS5WrapperTokenizer { static let name: String = "UserFTS5WrapperTokenizer" - var wrappedTokenizer: FTS5Tokenizer { preconditionFailure() } + var wrappedTokenizer: any FTS5Tokenizer { preconditionFailure() } required init(db: Database, arguments: [String]) throws { preconditionFailure() } func accept(token: String, flags: FTS5TokenFlags, for tokenization: FTS5Tokenization, tokenCallback: (String, FTS5TokenFlags) throws -> ()) throws { preconditionFailure() } } diff --git a/Tests/GRDBTests/DataMemoryTests.swift b/Tests/GRDBTests/DataMemoryTests.swift index 83c265e8fa..a6f3292983 100644 --- a/Tests/GRDBTests/DataMemoryTests.swift +++ b/Tests/GRDBTests/DataMemoryTests.swift @@ -34,15 +34,6 @@ class DataMemoryTests: GRDBTestCase { } } } - - do { - // This data should not be copied - let nonCopiedData = row.dataNoCopy(atIndex: 0)! - XCTAssertEqual(nonCopiedData, data) - nonCopiedData.withUnsafeBytes { - XCTAssertEqual($0.baseAddress, blobPointer) - } - } } } @@ -71,15 +62,6 @@ class DataMemoryTests: GRDBTestCase { } } } - - do { - // This data should not be copied - let nonCopiedData = nestedRow.dataNoCopy(atIndex: 0)! - XCTAssertEqual(nonCopiedData, data) - nonCopiedData.withUnsafeBytes { - XCTAssertEqual($0.baseAddress, blobPointer) - } - } } } @@ -107,13 +89,62 @@ class DataMemoryTests: GRDBTestCase { } } } - do { - // This data should not be copied: - let nonCopiedData = row.dataNoCopy(atIndex: 0)! - XCTAssertEqual(nonCopiedData, data) - nonCopiedData.withUnsafeBytes { nonCopiedBuffer in - XCTAssertEqual(nonCopiedBuffer.baseAddress, buffer.baseAddress) - } + } + default: + XCTFail("Not a blob") + } + } + } + } + + @available(*, deprecated) + func testDeprecatedMemoryBehavior() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + // Make sure Data is on the heap (15 bytes is enough) + // For more context, see: + // https://forums.swift.org/t/swift-5-how-to-test-data-bytesnocopydeallocator/20299/2?u=gwendal.roue + let data = Data(repeating: 0xaa, count: 15) + + do { + let rows = try Row.fetchCursor(db, sql: "SELECT ?", arguments: [data]) + while let row = try rows.next() { + let blobPointer = sqlite3_column_blob(row.sqliteStatement, 0) + // This data should not be copied + let nonCopiedData = row.dataNoCopy(atIndex: 0)! + XCTAssertEqual(nonCopiedData, data) + nonCopiedData.withUnsafeBytes { + XCTAssertEqual($0.baseAddress, blobPointer) + } + } + } + + do { + let adapter = ScopeAdapter(["nested": SuffixRowAdapter(fromIndex: 0)]) + let rows = try Row.fetchCursor(db, sql: "SELECT ?", arguments: [data], adapter: adapter) + while let row = try rows.next() { + let blobPointer = sqlite3_column_blob(row.unadapted.sqliteStatement, 0) + let nestedRow = row.scopes["nested"]! + // This data should not be copied + let nonCopiedData = nestedRow.dataNoCopy(atIndex: 0)! + XCTAssertEqual(nonCopiedData, data) + nonCopiedData.withUnsafeBytes { + XCTAssertEqual($0.baseAddress, blobPointer) + } + } + } + + do { + let row = try Row.fetchOne(db, sql: "SELECT ?", arguments: [data])! + let dbValue = row.first!.1 // TODO: think about exposing a (column:,databaseValue:) tuple + switch dbValue.storage { + case .blob(let data): + data.withUnsafeBytes { buffer in + // This data should not be copied: + let nonCopiedData = row.dataNoCopy(atIndex: 0)! + XCTAssertEqual(nonCopiedData, data) + nonCopiedData.withUnsafeBytes { nonCopiedBuffer in + XCTAssertEqual(nonCopiedBuffer.baseAddress, buffer.baseAddress) } } default: diff --git a/Tests/GRDBTests/DatabaseAbortedTransactionTests.swift b/Tests/GRDBTests/DatabaseAbortedTransactionTests.swift index 602e8705cb..d6229e88d8 100644 --- a/Tests/GRDBTests/DatabaseAbortedTransactionTests.swift +++ b/Tests/GRDBTests/DatabaseAbortedTransactionTests.swift @@ -4,7 +4,7 @@ import GRDB class DatabaseAbortedTransactionTests : GRDBTestCase { func testReadTransactionAbortedByInterrupt() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let semaphore1 = DispatchSemaphore(value: 0) let semaphore2 = DispatchSemaphore(value: 0) @@ -46,7 +46,7 @@ class DatabaseAbortedTransactionTests : GRDBTestCase { } func testReadTransactionAbortedByInterruptDoesNotPreventFurtherRead() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let semaphore1 = DispatchSemaphore(value: 0) let semaphore2 = DispatchSemaphore(value: 0) @@ -316,7 +316,7 @@ class DatabaseAbortedTransactionTests : GRDBTestCase { } return dbWriter } - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { do { try dbReader.unsafeRead { db in try db.inTransaction { diff --git a/Tests/GRDBTests/DatabaseAggregateTests.swift b/Tests/GRDBTests/DatabaseAggregateTests.swift index e07a827dbe..3a881b90ed 100644 --- a/Tests/GRDBTests/DatabaseAggregateTests.swift +++ b/Tests/GRDBTests/DatabaseAggregateTests.swift @@ -18,7 +18,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateReturningNull() throws { struct Aggregate : DatabaseAggregate { func step(_ values: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { nil } + func finalize() -> (any DatabaseValueConvertible)? { nil } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -31,7 +31,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateReturningInt64() throws { struct Aggregate : DatabaseAggregate { func step(_ values: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { Int64(1) } + func finalize() -> (any DatabaseValueConvertible)? { Int64(1) } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -45,7 +45,7 @@ class DatabaseAggregateTests: GRDBTestCase { let dbQueue = try makeDatabaseQueue() struct Aggregate : DatabaseAggregate { func step(_ values: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { 1e100 } + func finalize() -> (any DatabaseValueConvertible)? { 1e100 } } try dbQueue.inDatabase { db in let fn = DatabaseFunction("f", argumentCount: 0, aggregate: Aggregate.self) @@ -57,7 +57,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateReturningString() throws { struct Aggregate : DatabaseAggregate { func step(_ values: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { "foo" } + func finalize() -> (any DatabaseValueConvertible)? { "foo" } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -70,7 +70,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateReturningData() throws { struct Aggregate : DatabaseAggregate { func step(_ values: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { + func finalize() -> (any DatabaseValueConvertible)? { "foo".data(using: .utf8) } } @@ -85,7 +85,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateReturningCustomValueType() throws { struct Aggregate : DatabaseAggregate { func step(_ values: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { CustomValueType() } + func finalize() -> (any DatabaseValueConvertible)? { CustomValueType() } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -99,11 +99,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateArgumentNil() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = dbValues[0].isNull } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -120,11 +120,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateArgumentInt64() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = Int64.fromDatabaseValue(dbValues[0]) } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -138,11 +138,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateArgumentDouble() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = Double.fromDatabaseValue(dbValues[0]) } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -156,11 +156,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateArgumentString() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = String.fromDatabaseValue(dbValues[0]) } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -173,11 +173,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateArgumentBlob() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = Data.fromDatabaseValue(dbValues[0]) } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -191,11 +191,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateArgumentCustomValueType() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = CustomValueType.fromDatabaseValue(dbValues[0]) } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -211,7 +211,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateWithoutArgument() throws { struct Aggregate : DatabaseAggregate { func step(_ dbValues: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { "foo" } + func finalize() -> (any DatabaseValueConvertible)? { "foo" } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -232,11 +232,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateOfOneArgument() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = String.fromDatabaseValue(dbValues[0])?.uppercased() } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -259,12 +259,12 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateOfTwoArguments() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { let ints = dbValues.compactMap { Int.fromDatabaseValue($0) } result = ints.reduce(0, +) } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -285,11 +285,11 @@ class DatabaseAggregateTests: GRDBTestCase { func testVariadicFunction() throws { struct Aggregate : DatabaseAggregate { - var result: DatabaseValueConvertible? + var result: (any DatabaseValueConvertible)? mutating func step(_ dbValues: [DatabaseValue]) { result = dbValues.count } - func finalize() -> DatabaseValueConvertible? { result } + func finalize() -> (any DatabaseValueConvertible)? { result } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -308,7 +308,7 @@ class DatabaseAggregateTests: GRDBTestCase { func step(_ dbValues: [DatabaseValue]) throws { throw DatabaseError(message: "custom error message") } - func finalize() -> DatabaseValueConvertible? { fatalError() } + func finalize() -> (any DatabaseValueConvertible)? { fatalError() } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -329,7 +329,7 @@ class DatabaseAggregateTests: GRDBTestCase { func step(_ dbValues: [DatabaseValue]) throws { throw DatabaseError(resultCode: ResultCode(rawValue: 123)) } - func finalize() -> DatabaseValueConvertible? { fatalError() } + func finalize() -> (any DatabaseValueConvertible)? { fatalError() } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -350,7 +350,7 @@ class DatabaseAggregateTests: GRDBTestCase { func step(_ dbValues: [DatabaseValue]) throws { throw DatabaseError(resultCode: ResultCode(rawValue: 123), message: "custom error message") } - func finalize() -> DatabaseValueConvertible? { fatalError() } + func finalize() -> (any DatabaseValueConvertible)? { fatalError() } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -371,7 +371,7 @@ class DatabaseAggregateTests: GRDBTestCase { func step(_ dbValues: [DatabaseValue]) throws { throw NSError(domain: "CustomErrorDomain", code: 123, userInfo: [NSLocalizedDescriptionKey: "custom error message"]) } - func finalize() -> DatabaseValueConvertible? { fatalError() } + func finalize() -> (any DatabaseValueConvertible)? { fatalError() } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -394,7 +394,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateResultThrowingDatabaseErrorWithMessage() throws { struct Aggregate : DatabaseAggregate { func step(_ dbValues: [DatabaseValue]) { } - func finalize() throws -> DatabaseValueConvertible? { + func finalize() throws -> (any DatabaseValueConvertible)? { throw DatabaseError(message: "custom error message") } } @@ -415,7 +415,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateResultThrowingDatabaseErrorWithCode() throws { struct Aggregate : DatabaseAggregate { func step(_ dbValues: [DatabaseValue]) { } - func finalize() throws -> DatabaseValueConvertible? { + func finalize() throws -> (any DatabaseValueConvertible)? { throw DatabaseError(resultCode: ResultCode(rawValue: 123)) } } @@ -436,7 +436,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateResultThrowingDatabaseErrorWithMessageAndCode() throws { struct Aggregate : DatabaseAggregate { func step(_ dbValues: [DatabaseValue]) { } - func finalize() throws -> DatabaseValueConvertible? { + func finalize() throws -> (any DatabaseValueConvertible)? { throw DatabaseError(resultCode: ResultCode(rawValue: 123), message: "custom error message") } } @@ -457,7 +457,7 @@ class DatabaseAggregateTests: GRDBTestCase { func testAggregateResultThrowingCustomError() throws { struct Aggregate : DatabaseAggregate { func step(_ dbValues: [DatabaseValue]) { } - func finalize() throws -> DatabaseValueConvertible? { + func finalize() throws -> (any DatabaseValueConvertible)? { throw NSError(domain: "CustomErrorDomain", code: 123, userInfo: [NSLocalizedDescriptionKey: "custom error message"]) } } @@ -487,7 +487,7 @@ class DatabaseAggregateTests: GRDBTestCase { sum = (sum ?? 0) + int } } - func finalize() throws -> DatabaseValueConvertible? { sum } + func finalize() throws -> (any DatabaseValueConvertible)? { sum } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -505,7 +505,7 @@ class DatabaseAggregateTests: GRDBTestCase { sum = (sum ?? 0) + int } } - func finalize() throws -> DatabaseValueConvertible? { sum } + func finalize() throws -> (any DatabaseValueConvertible)? { sum } } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -526,7 +526,7 @@ class DatabaseAggregateTests: GRDBTestCase { init() { Aggregate.onInit?() } deinit { Aggregate.onDeinit?() } func step(_ dbValues: [DatabaseValue]) { } - func finalize() -> DatabaseValueConvertible? { nil } + func finalize() -> (any DatabaseValueConvertible)? { nil } } var allocationCount = 0 var aliveCount = 0 @@ -559,7 +559,7 @@ class DatabaseAggregateTests: GRDBTestCase { func step(_ dbValues: [DatabaseValue]) throws { throw DatabaseError(message: "boo") } - func finalize() -> DatabaseValueConvertible? { fatalError() } + func finalize() -> (any DatabaseValueConvertible)? { fatalError() } } var allocationCount = 0 var aliveCount = 0 @@ -590,7 +590,7 @@ class DatabaseAggregateTests: GRDBTestCase { init() { Aggregate.onInit?() } deinit { Aggregate.onDeinit?() } func step(_ dbValues: [DatabaseValue]) { } - func finalize() throws -> DatabaseValueConvertible? { + func finalize() throws -> (any DatabaseValueConvertible)? { throw DatabaseError(message: "boo") } } diff --git a/Tests/GRDBTests/DatabaseConfigurationTests.swift b/Tests/GRDBTests/DatabaseConfigurationTests.swift index b0d4d2714f..7ff0f47029 100644 --- a/Tests/GRDBTests/DatabaseConfigurationTests.swift +++ b/Tests/GRDBTests/DatabaseConfigurationTests.swift @@ -39,7 +39,7 @@ class DatabaseConfigurationTests: GRDBTestCase { var configuration = Configuration() configuration.prepareDatabase { db in - if let error = error { + if let error { throw error } } @@ -210,101 +210,104 @@ class DatabaseConfigurationTests: GRDBTestCase { _ = group.wait(timeout: .distantFuture) } - // TODO: fix flaky test -// func testBusyModeTimeoutTooShort() throws { -// let dbQueue1 = try makeDatabaseQueue(filename: "test.sqlite") -// #if GRDBCIPHER_USE_ENCRYPTION -// // Work around SQLCipher bug when two connections are open to the -// // same empty database: make sure the database is not empty before -// // running this test -// try dbQueue1.inDatabase { db in -// try db.execute(sql: "CREATE TABLE SQLCipherWorkAround (foo INTEGER)") -// } -// #endif -// -// var configuration2 = dbQueue1.configuration -// configuration2.busyMode = .timeout(0.1) -// let dbQueue2 = try makeDatabaseQueue(filename: "test.sqlite", configuration: configuration2) -// -// let s1 = DispatchSemaphore(value: 0) -// let s2 = DispatchSemaphore(value: 0) -// let queue = DispatchQueue.global(priority: .default) -// let group = DispatchGroup() -// -// queue.async(group: group) { -// do { -// try dbQueue1.inTransaction(.exclusive) { db in -// s2.signal() -// queue.asyncAfter(deadline: .now() + 1) { -// s1.signal() -// } -// _ = s1.wait(timeout: .distantFuture) -// return .commit -// } -// } catch { -// XCTFail("\(error)") -// } -// } -// -// queue.async(group: group) { -// do { -// _ = s2.wait(timeout: .distantFuture) -// try dbQueue2.inTransaction(.exclusive) { db in return .commit } -// XCTFail("Expected error") -// } catch DatabaseError.SQLITE_BUSY { -// } catch { -// XCTFail("\(error)") -// } -// } -// -// _ = group.wait(timeout: .distantFuture) -// } + func testBusyModeTimeoutTooShort() throws { + let dbQueue1 = try makeDatabaseQueue(filename: "test.sqlite") + #if GRDBCIPHER_USE_ENCRYPTION + // Work around SQLCipher bug when two connections are open to the + // same empty database: make sure the database is not empty before + // running this test + try dbQueue1.inDatabase { db in + try db.execute(sql: "CREATE TABLE SQLCipherWorkAround (foo INTEGER)") + } + #endif + + var configuration2 = dbQueue1.configuration + configuration2.busyMode = .timeout(0.1) + let dbQueue2 = try makeDatabaseQueue(filename: "test.sqlite", configuration: configuration2) + + let s1 = DispatchSemaphore(value: 0) + let s2 = DispatchSemaphore(value: 0) + let queue = DispatchQueue.global(qos: .default) + let group = DispatchGroup() + + queue.async(group: group) { + do { + try dbQueue1.inTransaction(.exclusive) { db in + // Let dbQueue2 attempt at opening an exclusive transaction + s2.signal() + // Wait for dbQueue2 to fail 😈 + _ = s1.wait(timeout: .distantFuture) + return .commit + } + } catch { + XCTFail("\(error)") + } + } + + queue.async(group: group) { + do { + // Wait for dbQueue1 to start an exclusive transaction + _ = s2.wait(timeout: .distantFuture) + try dbQueue2.inTransaction(.exclusive) { db in return .commit } + XCTFail("Expected error") + } catch DatabaseError.SQLITE_BUSY { + } catch { + XCTFail("\(error)") + } + // Let dbQueue1 end its transaction + s1.signal() + } + + _ = group.wait(timeout: .distantFuture) + } - // TODO: fix flaky test. It fails on Xcode 10.0, tvOS 10.0 -// func testBusyModeTimeoutTooLong() throws { -// let dbQueue1 = try makeDatabaseQueue(filename: "test.sqlite") -// #if GRDBCIPHER_USE_ENCRYPTION -// // Work around SQLCipher bug when two connections are open to the -// // same empty database: make sure the database is not empty before -// // running this test -// try dbQueue1.inDatabase { db in -// try db.execute(sql: "CREATE TABLE SQLCipherWorkAround (foo INTEGER)") -// } -// #endif -// -// var configuration2 = dbQueue1.configuration -// configuration2.busyMode = .timeout(1) -// let dbQueue2 = try makeDatabaseQueue(filename: "test.sqlite", configuration: configuration2) -// -// let s1 = DispatchSemaphore(value: 0) -// let s2 = DispatchSemaphore(value: 0) -// let queue = DispatchQueue.global(priority: .default) -// let group = DispatchGroup() -// -// queue.async(group: group) { -// do { -// try dbQueue1.inTransaction(.exclusive) { db in -// s2.signal() -// queue.asyncAfter(deadline: .now() + 0.1) { -// s1.signal() -// } -// _ = s1.wait(timeout: .distantFuture) -// return .commit -// } -// } catch { -// XCTFail("\(error)") -// } -// } -// -// queue.async(group: group) { -// do { -// _ = s2.wait(timeout: .distantFuture) -// try dbQueue2.inTransaction(.exclusive) { db in return .commit } -// } catch { -// XCTFail("\(error)") -// } -// } -// -// _ = group.wait(timeout: .distantFuture) -// } + func testBusyModeTimeoutTooLong() throws { + let dbQueue1 = try makeDatabaseQueue(filename: "test.sqlite") + #if GRDBCIPHER_USE_ENCRYPTION + // Work around SQLCipher bug when two connections are open to the + // same empty database: make sure the database is not empty before + // running this test + try dbQueue1.inDatabase { db in + try db.execute(sql: "CREATE TABLE SQLCipherWorkAround (foo INTEGER)") + } + #endif + + var configuration2 = dbQueue1.configuration + configuration2.busyMode = .timeout(1) + let dbQueue2 = try makeDatabaseQueue(filename: "test.sqlite", configuration: configuration2) + + let s1 = DispatchSemaphore(value: 0) + let s2 = DispatchSemaphore(value: 0) + let queue = DispatchQueue.global(qos: .default) + let group = DispatchGroup() + + queue.async(group: group) { + do { + try dbQueue1.inTransaction(.exclusive) { db in + // Let dbQueue2 attempt at opening an exclusive transaction + s2.signal() + // Wait for a delay and end dbQueue1 transaction + queue.asyncAfter(deadline: .now() + 0.1) { + s1.signal() + } + _ = s1.wait(timeout: .distantFuture) + return .commit + } + } catch { + XCTFail("\(error)") + } + } + + queue.async(group: group) { + do { + // Wait for dbQueue1 to start an exclusive transaction + _ = s2.wait(timeout: .distantFuture) + try dbQueue2.inTransaction(.exclusive) { db in return .commit } + } catch { + XCTFail("\(error)") + } + } + + _ = group.wait(timeout: .distantFuture) + } } diff --git a/Tests/GRDBTests/DatabaseDataDecodingStrategyTests.swift b/Tests/GRDBTests/DatabaseDataDecodingStrategyTests.swift new file mode 100644 index 0000000000..8ff1da13b2 --- /dev/null +++ b/Tests/GRDBTests/DatabaseDataDecodingStrategyTests.swift @@ -0,0 +1,139 @@ +import Foundation +import XCTest +@testable import GRDB // TODO: remove @testable when RowDecodingError is public + +private protocol StrategyProvider { + static var strategy: DatabaseDataDecodingStrategy { get } +} + +private enum StrategyDeferredToData: StrategyProvider { + static let strategy: DatabaseDataDecodingStrategy = .deferredToData +} + +private enum StrategyCustom: StrategyProvider { + static let strategy: DatabaseDataDecodingStrategy = .custom { dbValue in + if dbValue == "invalid".databaseValue { + return nil + } + return "foo".data(using: .utf8)! + } +} + +private struct RecordWithData: FetchableRecord, Decodable { + static var databaseDataDecodingStrategy: DatabaseDataDecodingStrategy { Strategy.strategy } + var data: Data +} + +private struct RecordWithOptionalData: FetchableRecord, Decodable { + static var databaseDataDecodingStrategy: DatabaseDataDecodingStrategy { Strategy.strategy } + var data: Data? +} + +class DatabaseDataDecodingStrategyTests: GRDBTestCase { + /// test the conversion from a database value to a data extracted from a record + private func test( + _ db: Database, + record: T.Type, + data: (T) -> Data?, + databaseValue: (any DatabaseValueConvertible)?, + with test: (Data?) -> Void) throws + { + let request = SQLRequest(sql: "SELECT ? AS data", arguments: [databaseValue]) + do { + // test decoding straight from SQLite + let record = try T.fetchOne(db, request)! + test(data(record)) + } + do { + // test decoding from copied row + let record = try T(row: Row.fetchOne(db, request)!) + test(data(record)) + } + } + + /// test the conversion from a database value to a data with a given strategy + private func test( + _ db: Database, + strategy: Strategy.Type, + databaseValue: some DatabaseValueConvertible, + _ test: (Data) -> Void) + throws + { + try self.test(db, record: RecordWithData.self, data: { $0.data }, databaseValue: databaseValue, with: { test($0!) }) + try self.test(db, record: RecordWithOptionalData.self, data: { $0.data }, databaseValue: databaseValue, with: { test($0!) }) + } + + private func testNullDecoding(_ db: Database, strategy: Strategy.Type) throws { + try self.test(db, record: RecordWithOptionalData.self, data: { $0.data }, databaseValue: nil) { data in + XCTAssertNil(data) + } + } +} + +// MARK: - deferredToData + +extension DatabaseDataDecodingStrategyTests { + func testDeferredToData() throws { + try makeDatabaseQueue().read { db in + // Null + try testNullDecoding(db, strategy: StrategyDeferredToData.self) + + // Empty string + try test(db, strategy: StrategyDeferredToData.self, databaseValue: "") { data in + XCTAssertEqual(data, Data()) + } + + // String + try test(db, strategy: StrategyDeferredToData.self, databaseValue: "foo") { data in + XCTAssertEqual(data, "foo".data(using: .utf8)) + } + + // Empty blob + try test(db, strategy: StrategyDeferredToData.self, databaseValue: Data()) { data in + XCTAssertEqual(data, Data()) + } + + // Blob + try test(db, strategy: StrategyDeferredToData.self, databaseValue: "foo".data(using: .utf8)) { data in + XCTAssertEqual(data, "foo".data(using: .utf8)) + } + } + } +} + +// MARK: - custom((DatabaseValue) -> Data? + +extension DatabaseDataDecodingStrategyTests { + func testCustom() throws { + try makeDatabaseQueue().read { db in + // Null + try testNullDecoding(db, strategy: StrategyCustom.self) + + // Data + try test(db, strategy: StrategyCustom.self, databaseValue: "valid") { data in + XCTAssertEqual(data, "foo".data(using: .utf8)!) + } + + // error + do { + try test(db, strategy: StrategyCustom.self, databaseValue: "invalid") { data in + XCTFail("Unexpected Data") + } + } catch let error as RowDecodingError { + switch error { + case .valueMismatch: + XCTAssertEqual(error.description, """ + could not decode Data from database value "invalid" - \ + column: "data", \ + column index: 0, \ + row: [data:"invalid"], \ + sql: `SELECT ? AS data`, \ + arguments: ["invalid"] + """) + default: + XCTFail("Unexpected Error") + } + } + } + } +} diff --git a/Tests/GRDBTests/DatabaseDataEncodingStrategyTests.swift b/Tests/GRDBTests/DatabaseDataEncodingStrategyTests.swift new file mode 100644 index 0000000000..6fdf7d8759 --- /dev/null +++ b/Tests/GRDBTests/DatabaseDataEncodingStrategyTests.swift @@ -0,0 +1,309 @@ +import XCTest +import Foundation +@testable import GRDB + +private protocol StrategyProvider { + static var strategy: DatabaseDataEncodingStrategy { get } +} + +private enum StrategyDeferredToData: StrategyProvider { + static let strategy: DatabaseDataEncodingStrategy = .deferredToData +} + +private enum StrategyTextUTF8: StrategyProvider { + static let strategy: DatabaseDataEncodingStrategy = .text +} + +private enum StrategyCustom: StrategyProvider { + static let strategy: DatabaseDataEncodingStrategy = .custom { _ in "custom" } +} + +private struct RecordWithData: EncodableRecord, Encodable { + static var databaseDataEncodingStrategy: DatabaseDataEncodingStrategy { Strategy.strategy } + var data: Data +} + +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) +extension RecordWithData: Identifiable { + var id: Data { data } +} + +private struct RecordWithOptionalData: EncodableRecord, Encodable { + static var databaseDataEncodingStrategy: DatabaseDataEncodingStrategy { Strategy.strategy } + var data: Data? +} + +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) +extension RecordWithOptionalData: Identifiable { + var id: Data? { data } +} + +class DatabaseDataEncodingStrategyTests: GRDBTestCase { + let testedDatas = [ + "foo".data(using: .utf8)!, + Data(), + ] + + private func test( + record: T, + expectedStorage: DatabaseValue.Storage) + throws + { + var container = PersistenceContainer() + try record.encode(to: &container) + if let dbValue = container["data"]?.databaseValue { + XCTAssertEqual(dbValue.storage, expectedStorage) + } else { + XCTAssertEqual(.null, expectedStorage) + } + } + + private func test( + strategy: Strategy.Type, + encodesData data: Data, + as value: some DatabaseValueConvertible) + throws + { + try test(record: RecordWithData(data: data), expectedStorage: value.databaseValue.storage) + try test(record: RecordWithOptionalData(data: data), expectedStorage: value.databaseValue.storage) + } + + private func testNullEncoding(strategy: Strategy.Type) throws { + try test(record: RecordWithOptionalData(data: nil), expectedStorage: .null) + } +} + +// MARK: - deferredToData + +extension DatabaseDataEncodingStrategyTests { + func testDeferredToData() throws { + try testNullEncoding(strategy: StrategyDeferredToData.self) + + for (data, value) in zip(testedDatas, [ + "foo".data(using: .utf8)!, + Data(), + ]) { try test(strategy: StrategyDeferredToData.self, encodesData: data, as: value) } + } +} + +// MARK: - text(UTF8) + +extension DatabaseDataEncodingStrategyTests { + func testTextUTF8() throws { + try testNullEncoding(strategy: StrategyTextUTF8.self) + + for (data, value) in zip(testedDatas, [ + "foo", + "", + ]) { try test(strategy: StrategyTextUTF8.self, encodesData: data, as: value) } + } +} + +// MARK: - custom((Data) -> DatabaseValueConvertible?) + +extension DatabaseDataEncodingStrategyTests { + func testCustom() throws { + try testNullEncoding(strategy: StrategyCustom.self) + + for (data, value) in zip(testedDatas, [ + "custom", + "custom", + ]) { try test(strategy: StrategyCustom.self, encodesData: data, as: value) } + } +} + +// MARK: - Filter + +extension DatabaseDataEncodingStrategyTests { + func testFilterKey() throws { + try makeDatabaseQueue().write { db in + try db.create(table: "t") { $0.primaryKey("id", .blob) } + + do { + let request = Table>("t").filter(key: testedDatas[0]) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" = x'666f6f' + """) + } + + do { + let request = Table>("t").filter(keys: testedDatas) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" IN (x'666f6f', x'') + """) + } + + do { + let request = Table>("t").filter(key: testedDatas[0]) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" = 'foo' + """) + } + + do { + let request = Table>("t").filter(keys: testedDatas) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" IN ('foo', '') + """) + } + } + } + + func testFilterID() throws { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { + throw XCTSkip("Identifiable not available") + } + + try makeDatabaseQueue().write { db in + try db.create(table: "t") { $0.primaryKey("id", .blob) } + + do { + let request = Table>("t").filter(id: testedDatas[0]) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" = x'666f6f' + """) + } + + do { + let request = Table>("t").filter(ids: testedDatas) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" IN (x'666f6f', x'') + """) + } + + do { + let request = Table>("t").filter(id: testedDatas[0]) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" = 'foo' + """) + } + + do { + let request = Table>("t").filter(ids: testedDatas) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" IN ('foo', '') + """) + } + + do { + let request = Table>("t").filter(id: nil) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE 0 + """) + } + + do { + let request = Table>("t").filter(id: testedDatas[0]) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" = x'666f6f' + """) + } + + do { + let request = Table>("t").filter(ids: testedDatas) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" IN (x'666f6f', x'') + """) + } + + do { + let request = Table>("t").filter(id: nil) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE 0 + """) + } + + do { + let request = Table>("t").filter(id: testedDatas[0]) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" = 'foo' + """) + } + + do { + let request = Table>("t").filter(ids: testedDatas) + try assertEqualSQL(db, request, """ + SELECT * FROM "t" WHERE "id" IN ('foo', '') + """) + } + } + } + + func testDeleteID() throws { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { + throw XCTSkip("Identifiable not available") + } + + try makeDatabaseQueue().write { db in + try db.create(table: "t") { $0.primaryKey("id", .blob) } + + do { + try Table>("t").deleteOne(db, id: testedDatas[0]) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" = x'666f6f' + """) + } + + do { + try Table>("t").deleteAll(db, ids: testedDatas) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" IN (x'666f6f', x'') + """) + } + + do { + try Table>("t").deleteOne(db, id: testedDatas[0]) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" = 'foo' + """) + } + + do { + try Table>("t").deleteAll(db, ids: testedDatas) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" IN ('foo', '') + """) + } + + do { + sqlQueries.removeAll() + try Table>("t").deleteOne(db, id: nil) + XCTAssertNil(lastSQLQuery) // Database not hit + } + + do { + try Table>("t").deleteOne(db, id: testedDatas[0]) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" = x'666f6f' + """) + } + + do { + try Table>("t").deleteAll(db, ids: testedDatas) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" IN (x'666f6f', x'') + """) + } + + do { + sqlQueries.removeAll() + try Table>("t").deleteOne(db, id: nil) + XCTAssertNil(lastSQLQuery) // Database not hit + } + + do { + try Table>("t").deleteOne(db, id: testedDatas[0]) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" = 'foo' + """) + } + + do { + try Table>("t").deleteAll(db, ids: testedDatas) + XCTAssertEqual(lastSQLQuery, """ + DELETE FROM "t" WHERE "id" IN ('foo', '') + """) + } + } + } +} diff --git a/Tests/GRDBTests/DatabaseDateDecodingStrategyTests.swift b/Tests/GRDBTests/DatabaseDateDecodingStrategyTests.swift index fb97465bc6..5f920fe736 100644 --- a/Tests/GRDBTests/DatabaseDateDecodingStrategyTests.swift +++ b/Tests/GRDBTests/DatabaseDateDecodingStrategyTests.swift @@ -62,7 +62,7 @@ class DatabaseDateDecodingStrategyTests: GRDBTestCase { _ db: Database, record: T.Type, date: (T) -> Date?, - databaseValue: DatabaseValueConvertible?, + databaseValue: (any DatabaseValueConvertible)?, with test: (Date?) -> Void) throws { let request = SQLRequest(sql: "SELECT ? AS date", arguments: [databaseValue]) @@ -79,7 +79,13 @@ class DatabaseDateDecodingStrategyTests: GRDBTestCase { } /// test the conversion from a database value to a date with a given strategy - private func test(_ db: Database, strategy: Strategy.Type, databaseValue: DatabaseValueConvertible, _ test: (Date) -> Void) throws { + private func test( + _ db: Database, + strategy: Strategy.Type, + databaseValue: some DatabaseValueConvertible, + _ test: (Date) -> Void) + throws + { try self.test(db, record: RecordWithDate.self, date: { $0.date }, databaseValue: databaseValue, with: { test($0!) }) try self.test(db, record: RecordWithOptionalDate.self, date: { $0.date }, databaseValue: databaseValue, with: { test($0!) }) } diff --git a/Tests/GRDBTests/DatabaseDateEncodingStrategyTests.swift b/Tests/GRDBTests/DatabaseDateEncodingStrategyTests.swift index aa4b996fa4..98b1ab605b 100644 --- a/Tests/GRDBTests/DatabaseDateEncodingStrategyTests.swift +++ b/Tests/GRDBTests/DatabaseDateEncodingStrategyTests.swift @@ -36,7 +36,7 @@ private enum StrategyFormatted: StrategyProvider { formatter.locale = Locale(identifier: "en_US_POSIX") formatter.timeZone = TimeZone(secondsFromGMT: 0)! formatter.dateStyle = .full - formatter.timeStyle = .medium + formatter.timeStyle = .none return formatter }()) } @@ -50,7 +50,7 @@ private struct RecordWithDate: EncodableRecord, Enco var date: Date } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension RecordWithDate: Identifiable { var id: Date { date } } @@ -60,7 +60,7 @@ private struct RecordWithOptionalDate: EncodableReco var date: Date? } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension RecordWithOptionalDate: Identifiable { var id: Date? { date } } @@ -87,7 +87,12 @@ class DatabaseDateEncodingStrategyTests: GRDBTestCase { } } - private func test(strategy: Strategy.Type, encodesDate date: Date, as value: DatabaseValueConvertible) throws { + private func test( + strategy: Strategy.Type, + encodesDate date: Date, + as value: some DatabaseValueConvertible) + throws + { try test(record: RecordWithDate(date: date), expectedStorage: value.databaseValue.storage) try test(record: RecordWithOptionalDate(date: date), expectedStorage: value.databaseValue.storage) } @@ -194,10 +199,10 @@ extension DatabaseDateEncodingStrategyTests { try testNullEncoding(strategy: StrategyFormatted.self) for (date, value) in zip(testedDates, [ - "Saturday, December 20, 1969 at 1:39:05 PM", - "Friday, January 2, 1970 at 10:17:36 AM", - "Monday, January 1, 2001 at 12:00:00 AM", - "Tuesday, January 2, 2001 at 10:17:36 AM", + "Saturday, December 20, 1969", + "Friday, January 2, 1970", + "Monday, January 1, 2001", + "Tuesday, January 2, 2001", ]) { try test(strategy: StrategyFormatted.self, encodesDate: date, as: value) } } } @@ -255,7 +260,7 @@ extension DatabaseDateEncodingStrategyTests { } func testFilterID() throws { - guard #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Identifiable not available") } @@ -335,7 +340,7 @@ extension DatabaseDateEncodingStrategyTests { } func testDeleteID() throws { - guard #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Identifiable not available") } diff --git a/Tests/GRDBTests/DatabaseDumpTests.swift b/Tests/GRDBTests/DatabaseDumpTests.swift new file mode 100644 index 0000000000..c0551a0e88 --- /dev/null +++ b/Tests/GRDBTests/DatabaseDumpTests.swift @@ -0,0 +1,1646 @@ +import XCTest +import GRDB + +private final class TestStream: TextOutputStream { + var output: String + + init() { + output = "" + } + + func write(_ string: String) { + output.append(string) + } +} + +private struct Player: Codable, MutablePersistableRecord { + static let team = belongsTo(Team.self) + var id: Int64? + var name: String + var teamId: String? + + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} + +private struct Team: Codable, PersistableRecord { + static let players = hasMany(Player.self) + var id: String + var name: String + var color: String +} + +final class DatabaseDumpTests: GRDBTestCase { + // MARK: - Debug + + func test_debug_value_formatting() throws { + try makeValuesDatabase().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT * FROM value ORDER BY name", format: .debug(), to: stream) + XCTAssertEqual(stream.output, """ + blob: ascii apostrophe|['] + blob: ascii double quote|["] + blob: ascii line feed|[ + ] + blob: ascii long|Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus. + blob: ascii short|Hello + blob: ascii tab|[\t] + blob: binary short|X'80' + blob: empty| + blob: utf8 short|您好🙂 + blob: uuid|69BF8A9C-D9F0-4777-BD11-93451D84CBCF + double: -1.0|-1.0 + double: -inf|-inf + double: 0.0|0.0 + double: 123.45|123.45 + double: inf|inf + double: nan| + integer: -1|-1 + integer: 0|0 + integer: 123|123 + integer: max|9223372036854775807 + integer: min|-9223372036854775808 + null| + text: ascii apostrophe|['] + text: ascii backslash|[\\] + text: ascii double quote|["] + text: ascii line feed|[ + ] + text: ascii long|Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus. + text: ascii short|Hello + text: ascii slash|[/] + text: ascii tab|[\t] + text: ascii url|https://github.com/groue/GRDB.swift + text: empty| + text: utf8 short|您好🙂 + + """) + } + } + + func test_debug_empty_results() throws { + try makeDatabaseQueue().write { db in + do { + // Columns + let stream = TestStream() + try db.dumpSQL("SELECT NULL WHERE NULL", format: .debug(), to: stream) + XCTAssertEqual(stream.output, "") + } + do { + // No columns + let stream = TestStream() + try db.dumpSQL("CREATE TABLE t(a)", format: .debug(), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_debug_headers() throws { + try makeRugbyDatabase().read { db in + do { + // Headers on + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .debug(header: true), to: stream) + XCTAssertEqual(stream.output, """ + id|teamId|name + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Headers on, no result + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player WHERE 0", format: .debug(header: true), to: stream) + XCTAssertEqual(stream.output, "") + } + do { + // Headers off + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .debug(header: false), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Headers off, no result + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player WHERE 0", format: .debug(header: false), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_debug_duplicate_columns() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT 1 AS name, 'foo' AS name", format: .debug(header: true), to: stream) + XCTAssertEqual(stream.output, """ + name|name + 1|foo + + """) + } + } + + func test_debug_multiple_statements() throws { + try makeDatabaseQueue().write { db in + let stream = TestStream() + try db.dumpSQL( + """ + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 'foo'); + INSERT INTO t VALUES (2, 'bar'); + SELECT * FROM t ORDER BY a; + SELECT b FROM t ORDER BY b; + SELECT NULL WHERE NULL; + """, + format: .debug(), + to: stream) + XCTAssertEqual(stream.output, """ + 1|foo + 2|bar + bar + foo + + """) + } + } + + func test_debug_separator() throws { + try makeRugbyDatabase().read { db in + do { + // Default separator + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .debug(header: true), to: stream) + XCTAssertEqual(stream.output, """ + id|teamId|name + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Custom separator + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .debug(header: true, separator: "---"), to: stream) + XCTAssertEqual(stream.output, """ + id---teamId---name + 1---FRA---Antoine Dupond + 2---ENG---Owen Farrell + 3------Gwendal Roué + + """) + } + } + } + + func test_debug_nullValue() throws { + try makeRugbyDatabase().read { db in + do { + // Default null + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .debug(), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Custom null + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .debug(nullValue: "NULL"), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3|NULL|Gwendal Roué + + """) + } + } + } + + // MARK: - JSON + + func test_json_value_formatting() throws { + guard #available(iOS 13.0, macOS 10.15, tvOS 13.0, watchOS 6.0, *) else { + throw XCTSkip("Skip because this test relies on JSONEncoder.OutputFormatting.withoutEscapingSlashes") + } + + try makeValuesDatabase().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT * FROM value ORDER BY name", format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"name":"blob: ascii apostrophe","value":"Wydd"}, + {"name":"blob: ascii double quote","value":"WyJd"}, + {"name":"blob: ascii line feed","value":"Wwpd"}, + {"name":"blob: ascii long","value":"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gTW9yYmkgdHJpc3RpcXVlIHRlbXBvciBjb25kaW1lbnR1bS4gUGVsbGVudGVzcXVlIHBoYXJldHJhIGxhY3VzIG5vbiBhbnRlIHNvbGxpY2l0dWRpbiBhdWN0b3IuIFZlc3RpYnVsdW0gc2l0IGFtZXQgbWF1cmlzIHZpdGFlIHVybmEgbm9uIGx1Y3R1cy4="}, + {"name":"blob: ascii short","value":"SGVsbG8="}, + {"name":"blob: ascii tab","value":"Wwld"}, + {"name":"blob: binary short","value":"gA=="}, + {"name":"blob: empty","value":""}, + {"name":"blob: utf8 short","value":"5oKo5aW98J+Zgg=="}, + {"name":"blob: uuid","value":"ab+KnNnwR3e9EZNFHYTLzw=="}, + {"name":"double: -1.0","value":-1}, + {"name":"double: -inf","value":"-inf"}, + {"name":"double: 0.0","value":0}, + {"name":"double: 123.45","value":123.45}, + {"name":"double: inf","value":"inf"}, + {"name":"double: nan","value":null}, + {"name":"integer: -1","value":-1}, + {"name":"integer: 0","value":0}, + {"name":"integer: 123","value":123}, + {"name":"integer: max","value":9223372036854775807}, + {"name":"integer: min","value":-9223372036854775808}, + {"name":"null","value":null}, + {"name":"text: ascii apostrophe","value":"[']"}, + {"name":"text: ascii backslash","value":"[\\\\]"}, + {"name":"text: ascii double quote","value":"[\\"]"}, + {"name":"text: ascii line feed","value":"[\\n]"}, + {"name":"text: ascii long","value":"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus."}, + {"name":"text: ascii short","value":"Hello"}, + {"name":"text: ascii slash","value":"[/]"}, + {"name":"text: ascii tab","value":"[\\t]"}, + {"name":"text: ascii url","value":"https://github.com/groue/GRDB.swift"}, + {"name":"text: empty","value":""}, + {"name":"text: utf8 short","value":"您好🙂"}] + + """) + } + } + + func test_json_empty_results() throws { + try makeDatabaseQueue().write { db in + do { + // Columns + let stream = TestStream() + try db.dumpSQL("SELECT NULL WHERE NULL", format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [] + + """) + } + do { + // No columns + let stream = TestStream() + try db.dumpSQL("CREATE TABLE t(a)", format: .json(), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_json_duplicate_columns() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT 1 AS name, 'foo' AS name", format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"name":1,"name":"foo"}] + + """) + } + } + + func test_json_multiple_statements() throws { + try makeDatabaseQueue().write { db in + let stream = TestStream() + try db.dumpSQL( + """ + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 'foo'); + INSERT INTO t VALUES (2, 'bar'); + SELECT * FROM t ORDER BY a; + SELECT b FROM t ORDER BY b; + SELECT NULL WHERE NULL; + """, + format: .json(), + to: stream) + XCTAssertEqual(stream.output, """ + [{"a":1,"b":"foo"}, + {"a":2,"b":"bar"}] + [{"b":"bar"}, + {"b":"foo"}] + [] + + """) + } + } + + func test_json_custom_encoder() throws { + try makeDatabaseQueue().write { db in + try db.execute(literal: """ + CREATE TABLE t(id INTEGER PRIMARY KEY, name TEXT, value DOUBLE); + INSERT INTO t VALUES (1, 'a', 0.0); + INSERT INTO t VALUES (2, 'b', \(1.0 / 0)); + INSERT INTO t VALUES (3, 'c', \(-1.0 / 0)); + """) + let encoder = JSONDumpFormat.defaultEncoder + encoder.nonConformingFloatEncodingStrategy = .convertToString( + positiveInfinity: "much too much", + negativeInfinity: "whoops", + nan: "") + encoder.outputFormatting = [.prettyPrinted, .sortedKeys /* ignored */] + let stream = TestStream() + try db.dumpSQL("SELECT * FROM t ORDER BY id", format: .json(encoder: encoder), to: stream) + XCTAssertEqual(stream.output, """ + [ + { + "id":1, + "name":"a", + "value":0 + }, + { + "id":2, + "name":"b", + "value":"much too much" + }, + { + "id":3, + "name":"c", + "value":"whoops" + } + ] + + """) + } + } + + // MARK: - Line + + func test_line_value_formatting() throws { + try makeValuesDatabase().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT * FROM value ORDER BY name", format: .line(), to: stream) + XCTAssertEqual(stream.output, """ + name = blob: ascii apostrophe + value = ['] + + name = blob: ascii double quote + value = ["] + + name = blob: ascii line feed + value = [ + ] + + name = blob: ascii long + value = Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus. + + name = blob: ascii short + value = Hello + + name = blob: ascii tab + value = [\t] + + name = blob: binary short + value = � + + name = blob: empty + value = \n\ + + name = blob: utf8 short + value = 您好🙂 + + name = blob: uuid + value = \("i\u{fffd}\u{fffd}\u{fffd}\u{fffd}\u{fffd}Gw\u{fffd}\u{11}\u{fffd}E\u{1d}\u{fffd}\u{fffd}\u{fffd}") + + name = double: -1.0 + value = -1.0 + + name = double: -inf + value = -inf + + name = double: 0.0 + value = 0.0 + + name = double: 123.45 + value = 123.45 + + name = double: inf + value = inf + + name = double: nan + value = \n\ + + name = integer: -1 + value = -1 + + name = integer: 0 + value = 0 + + name = integer: 123 + value = 123 + + name = integer: max + value = 9223372036854775807 + + name = integer: min + value = -9223372036854775808 + + name = null + value = \n\ + + name = text: ascii apostrophe + value = ['] + + name = text: ascii backslash + value = [\\] + + name = text: ascii double quote + value = ["] + + name = text: ascii line feed + value = [ + ] + + name = text: ascii long + value = Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus. + + name = text: ascii short + value = Hello + + name = text: ascii slash + value = [/] + + name = text: ascii tab + value = [\t] + + name = text: ascii url + value = https://github.com/groue/GRDB.swift + + name = text: empty + value = \n\ + + name = text: utf8 short + value = 您好🙂 + + """) + } + } + + func test_line_empty_results() throws { + try makeDatabaseQueue().write { db in + do { + // Columns + let stream = TestStream() + try db.dumpSQL("SELECT NULL WHERE NULL", format: .line(), to: stream) + XCTAssertEqual(stream.output, "") + } + do { + // No columns + let stream = TestStream() + try db.dumpSQL("CREATE TABLE t(a)", format: .line(), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_line_duplicate_columns() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT 1 AS name, 'foo' AS name", format: .line(), to: stream) + XCTAssertEqual(stream.output, """ + name = 1 + name = foo + + """) + } + } + + func test_line_multiple_statements() throws { + try makeDatabaseQueue().write { db in + let stream = TestStream() + try db.dumpSQL( + """ + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 'foo'); + INSERT INTO t VALUES (2, 'bar'); + SELECT * FROM t ORDER BY a; + SELECT b FROM t ORDER BY b; + SELECT NULL WHERE NULL; + """, + format: .line(), + to: stream) + XCTAssertEqual(stream.output, """ + a = 1 + b = foo + + a = 2 + b = bar + + b = bar + + b = foo + + """) + } + } + + func test_line_nullValue() throws { + try makeRugbyDatabase().read { db in + do { + // Default null + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .line(), to: stream) + XCTAssertEqual(stream.output, """ + id = 1 + teamId = FRA + name = Antoine Dupond + + id = 2 + teamId = ENG + name = Owen Farrell + + id = 3 + teamId = \n\ + name = Gwendal Roué + + """) + } + do { + // Custom null + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .line(nullValue: "NULL"), to: stream) + XCTAssertEqual(stream.output, """ + id = 1 + teamId = FRA + name = Antoine Dupond + + id = 2 + teamId = ENG + name = Owen Farrell + + id = 3 + teamId = NULL + name = Gwendal Roué + + """) + } + } + } + + // MARK: - List + + func test_list_value_formatting() throws { + try makeValuesDatabase().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT * FROM value ORDER BY name", format: .list(), to: stream) + XCTAssertEqual(stream.output, """ + blob: ascii apostrophe|['] + blob: ascii double quote|["] + blob: ascii line feed|[ + ] + blob: ascii long|Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus. + blob: ascii short|Hello + blob: ascii tab|[\t] + blob: binary short|� + blob: empty| + blob: utf8 short|您好🙂 + blob: uuid|\("i\u{fffd}\u{fffd}\u{fffd}\u{fffd}\u{fffd}Gw\u{fffd}\u{11}\u{fffd}E\u{1d}\u{fffd}\u{fffd}\u{fffd}") + double: -1.0|-1.0 + double: -inf|-inf + double: 0.0|0.0 + double: 123.45|123.45 + double: inf|inf + double: nan| + integer: -1|-1 + integer: 0|0 + integer: 123|123 + integer: max|9223372036854775807 + integer: min|-9223372036854775808 + null| + text: ascii apostrophe|['] + text: ascii backslash|[\\] + text: ascii double quote|["] + text: ascii line feed|[ + ] + text: ascii long|Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus. + text: ascii short|Hello + text: ascii slash|[/] + text: ascii tab|[\t] + text: ascii url|https://github.com/groue/GRDB.swift + text: empty| + text: utf8 short|您好🙂 + + """) + } + } + + func test_list_empty_results() throws { + try makeDatabaseQueue().write { db in + do { + // Columns + let stream = TestStream() + try db.dumpSQL("SELECT NULL WHERE NULL", format: .list(), to: stream) + XCTAssertEqual(stream.output, "") + } + do { + // No columns + let stream = TestStream() + try db.dumpSQL("CREATE TABLE t(a)", format: .list(), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_list_headers() throws { + try makeRugbyDatabase().read { db in + do { + // Headers on + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .list(header: true), to: stream) + XCTAssertEqual(stream.output, """ + id|teamId|name + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Headers on, no result + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player WHERE 0", format: .list(header: true), to: stream) + XCTAssertEqual(stream.output, "") + } + do { + // Headers off + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .list(header: false), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Headers off, no result + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player WHERE 0", format: .list(header: false), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_list_duplicate_columns() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT 1 AS name, 'foo' AS name", format: .list(header: true), to: stream) + XCTAssertEqual(stream.output, """ + name|name + 1|foo + + """) + } + } + + func test_list_multiple_statements() throws { + try makeDatabaseQueue().write { db in + let stream = TestStream() + try db.dumpSQL( + """ + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 'foo'); + INSERT INTO t VALUES (2, 'bar'); + SELECT * FROM t ORDER BY a; + SELECT b FROM t ORDER BY b; + SELECT NULL WHERE NULL; + """, + format: .list(), + to: stream) + XCTAssertEqual(stream.output, """ + 1|foo + 2|bar + bar + foo + + """) + } + } + + func test_list_separator() throws { + try makeRugbyDatabase().read { db in + do { + // Default separator + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .list(header: true), to: stream) + XCTAssertEqual(stream.output, """ + id|teamId|name + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Custom separator + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .list(header: true, separator: "---"), to: stream) + XCTAssertEqual(stream.output, """ + id---teamId---name + 1---FRA---Antoine Dupond + 2---ENG---Owen Farrell + 3------Gwendal Roué + + """) + } + } + } + + func test_list_nullValue() throws { + try makeRugbyDatabase().read { db in + do { + // Default null + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .list(), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Custom null + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .list(nullValue: "NULL"), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3|NULL|Gwendal Roué + + """) + } + } + } + + // MARK: - Quote + + func test_quote_value_formatting() throws { + try makeValuesDatabase().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT * FROM value ORDER BY name", format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + 'blob: ascii apostrophe',X'5B275D' + 'blob: ascii double quote',X'5B225D' + 'blob: ascii line feed',X'5B0A5D' + 'blob: ascii long',X'4C6F72656D20697073756D20646F6C6F722073697420616D65742C20636F6E73656374657475722061646970697363696E6720656C69742E204D6F726269207472697374697175652074656D706F7220636F6E64696D656E74756D2E2050656C6C656E746573717565207068617265747261206C61637573206E6F6E20616E746520736F6C6C696369747564696E20617563746F722E20566573746962756C756D2073697420616D6574206D61757269732076697461652075726E61206E6F6E206C75637475732E' + 'blob: ascii short',X'48656C6C6F' + 'blob: ascii tab',X'5B095D' + 'blob: binary short',X'80' + 'blob: empty',X'' + 'blob: utf8 short',X'E682A8E5A5BDF09F9982' + 'blob: uuid',X'69BF8A9CD9F04777BD1193451D84CBCF' + 'double: -1.0',-1.0 + 'double: -inf',-Inf + 'double: 0.0',0.0 + 'double: 123.45',123.45 + 'double: inf',Inf + 'double: nan',NULL + 'integer: -1',-1 + 'integer: 0',0 + 'integer: 123',123 + 'integer: max',9223372036854775807 + 'integer: min',-9223372036854775808 + 'null',NULL + 'text: ascii apostrophe','['']' + 'text: ascii backslash','[\\]' + 'text: ascii double quote','["]' + 'text: ascii line feed','[ + ]' + 'text: ascii long','Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus.' + 'text: ascii short','Hello' + 'text: ascii slash','[/]' + 'text: ascii tab','[\t]' + 'text: ascii url','https://github.com/groue/GRDB.swift' + 'text: empty','' + 'text: utf8 short','您好🙂' + + """) + } + } + + func test_quote_empty_results() throws { + try makeDatabaseQueue().write { db in + do { + // Columns + let stream = TestStream() + try db.dumpSQL("SELECT NULL WHERE NULL", format: .quote(), to: stream) + XCTAssertEqual(stream.output, "") + } + do { + // No columns + let stream = TestStream() + try db.dumpSQL("CREATE TABLE t(a)", format: .quote(), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_quote_headers() throws { + try makeRugbyDatabase().read { db in + do { + // Headers on + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .quote(header: true), to: stream) + XCTAssertEqual(stream.output, """ + 'id','teamId','name' + 1,'FRA','Antoine Dupond' + 2,'ENG','Owen Farrell' + 3,NULL,'Gwendal Roué' + + """) + } + do { + // Headers on, no result + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player WHERE 0", format: .quote(header: true), to: stream) + XCTAssertEqual(stream.output, "") + } + do { + // Headers off + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .quote(header: false), to: stream) + XCTAssertEqual(stream.output, """ + 1,'FRA','Antoine Dupond' + 2,'ENG','Owen Farrell' + 3,NULL,'Gwendal Roué' + + """) + } + do { + // Headers off, no result + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player WHERE 0", format: .quote(header: false), to: stream) + XCTAssertEqual(stream.output, "") + } + } + } + + func test_quote_duplicate_columns() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + try db.dumpSQL("SELECT 1 AS name, 'foo' AS name", format: .quote(header: true), to: stream) + XCTAssertEqual(stream.output, """ + 'name','name' + 1,'foo' + + """) + } + } + + func test_quote_multiple_statements() throws { + try makeDatabaseQueue().write { db in + let stream = TestStream() + try db.dumpSQL( + """ + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 'foo'); + INSERT INTO t VALUES (2, 'bar'); + SELECT * FROM t ORDER BY a; + SELECT b FROM t ORDER BY b; + SELECT NULL WHERE NULL; + """, + format: .quote(), + to: stream) + XCTAssertEqual(stream.output, """ + 1,'foo' + 2,'bar' + 'bar' + 'foo' + + """) + } + } + + func test_quote_separator() throws { + try makeRugbyDatabase().read { db in + do { + // Default separator + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .quote(header: true), to: stream) + XCTAssertEqual(stream.output, """ + 'id','teamId','name' + 1,'FRA','Antoine Dupond' + 2,'ENG','Owen Farrell' + 3,NULL,'Gwendal Roué' + + """) + } + do { + // Custom separator + let stream = TestStream() + try db.dumpSQL("SELECT * FROM player ORDER BY id", format: .quote(header: true, separator: "---"), to: stream) + XCTAssertEqual(stream.output, """ + 'id'---'teamId'---'name' + 1---'FRA'---'Antoine Dupond' + 2---'ENG'---'Owen Farrell' + 3---NULL---'Gwendal Roué' + + """) + } + } + } + + // MARK: - Dump error + + func test_dumpError() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + do { + try db.dumpSQL( + """ + SELECT 'Hello'; + Not sql; + """, + to: stream) + XCTFail("Expected error") + } catch { + XCTAssertEqual(stream.output, """ + Hello + + """) + } + } + } + + // MARK: - Request dump + + func test_dumpRequest() throws { + try makeRugbyDatabase().read { db in + do { + // Default format + let stream = TestStream() + try db.dumpRequest(Player.orderByPrimaryKey(), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpRequest(Player.orderByPrimaryKey(), format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}, + {"id":3,"teamId":null,"name":"Gwendal Roué"}] + + """) + } + } + } + + func test_dumpRequest_association_to_one() throws { + try makeRugbyDatabase().read { db in + let request = Player.orderByPrimaryKey().including(required: Player.team) + + do { + // Default format + let stream = TestStream() + try db.dumpRequest(request, to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond|FRA|XV de France|blue + 2|ENG|Owen Farrell|ENG|England Rugby|white + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpRequest(request, format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"id":1,"teamId":"FRA","name":"Antoine Dupond","id":"FRA","name":"XV de France","color":"blue"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell","id":"ENG","name":"England Rugby","color":"white"}] + + """) + } + } + } + + func test_dumpRequest_association_to_many() throws { + try makeRugbyDatabase().read { db in + let request = Team.orderByPrimaryKey().including(all: Team.players.orderByPrimaryKey()) + + do { + // Default format + let stream = TestStream() + try db.dumpRequest(request, to: stream) + XCTAssertEqual(stream.output, """ + ENG|England Rugby|white + FRA|XV de France|blue + + players + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpRequest(request, format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"id":"ENG","name":"England Rugby","color":"white"}, + {"id":"FRA","name":"XV de France","color":"blue"}] + + players + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}] + + """) + } + } + } + + // MARK: - Table dump + + func test_dumpTables_zero() throws { + try makeRugbyDatabase().read { db in + let stream = TestStream() + try db.dumpTables([], to: stream) + XCTAssertEqual(stream.output, "") + } + } + + func test_dumpTables_single_table() throws { + try makeRugbyDatabase().read { db in + do { + // Default format + let stream = TestStream() + try db.dumpTables(["player"], to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpTables(["player"], format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}, + {"id":3,"teamId":null,"name":"Gwendal Roué"}] + + """) + } + } + } + + func test_dumpTables_single_view() throws { + try makeRugbyDatabase().write { db in + try db.create(view: "playerName", as: Player + .orderByPrimaryKey() + .select(Column("name"))) + + do { + // Default order: use the view ordering + do { + // Default format + let stream = TestStream() + try db.dumpTables(["playerName"], to: stream) + XCTAssertEqual(stream.output, """ + Antoine Dupond + Owen Farrell + Gwendal Roué + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpTables(["playerName"], format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"name":"Antoine Dupond"}, + {"name":"Owen Farrell"}, + {"name":"Gwendal Roué"}] + + """) + } + } + + do { + // Stable order + do { + // Default format + let stream = TestStream() + try db.dumpTables(["playerName"], stableOrder: true, to: stream) + XCTAssertEqual(stream.output, """ + Antoine Dupond + Gwendal Roué + Owen Farrell + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpTables(["playerName"], format: .json(), stableOrder: true, to: stream) + XCTAssertEqual(stream.output, """ + [{"name":"Antoine Dupond"}, + {"name":"Gwendal Roué"}, + {"name":"Owen Farrell"}] + + """) + } + } + } + } + + func test_dumpTables_multiple() throws { + try makeRugbyDatabase().read { db in + do { + // Default format + let stream = TestStream() + try db.dumpTables(["player", "team"], to: stream) + XCTAssertEqual(stream.output, """ + player + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + team + ENG|England Rugby|white + FRA|XV de France|blue + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpTables(["team", "player"], format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + team + [{"id":"ENG","name":"England Rugby","color":"white"}, + {"id":"FRA","name":"XV de France","color":"blue"}] + + player + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}, + {"id":3,"teamId":null,"name":"Gwendal Roué"}] + + """) + } + } + } + + // MARK: - Database schema dump + + func test_dumpSchema() throws { + try makeRugbyDatabase().read { db in + let stream = TestStream() + try db.dumpSchema(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "teamId" TEXT REFERENCES "team"("id"), "name" TEXT NOT NULL); + CREATE INDEX "player_on_teamId" ON "player"("teamId"); + CREATE TABLE "team" ("id" TEXT PRIMARY KEY NOT NULL, "name" TEXT NOT NULL, "color" TEXT NOT NULL); + + """) + } + } + + func test_dumpSchema_empty_database() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + try db.dumpSchema(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + + """) + } + } + + func test_dumpSchema_empty_tables() throws { + try makeDatabaseQueue().write { db in + try db.execute(literal: """ + CREATE TABLE blue(name); + CREATE TABLE red(name); + CREATE TABLE yellow(name); + INSERT INTO red VALUES ('vermillon') + """) + let stream = TestStream() + try db.dumpSchema(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE blue(name); + CREATE TABLE red(name); + CREATE TABLE yellow(name); + + """) + } + } + + func test_dumpSchema_sqlite_master_ordering() throws { + try makeDatabaseQueue().write { db in + try db.execute(literal: """ + CREATE TABLE blue(name); + CREATE TABLE RED(name); + CREATE TABLE yellow(name); + CREATE INDEX index_blue1 ON blue(name); + CREATE INDEX INDEX_blue2 ON blue(name); + CREATE INDEX indexRed1 ON RED(name); + CREATE INDEX INDEXRed2 ON RED(name); + CREATE VIEW colors1 AS SELECT name FROM blue; + CREATE VIEW COLORS2 AS SELECT name FROM blue UNION SELECT name FROM yellow; + CREATE TRIGGER update_blue UPDATE OF name ON blue + BEGIN + DELETE FROM RED; + END; + CREATE TRIGGER update_RED UPDATE OF name ON RED + BEGIN + DELETE FROM yellow; + END; + """) + let stream = TestStream() + try db.dumpSchema(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE blue(name); + CREATE INDEX index_blue1 ON blue(name); + CREATE INDEX INDEX_blue2 ON blue(name); + CREATE TRIGGER update_blue UPDATE OF name ON blue + BEGIN + DELETE FROM RED; + END; + CREATE VIEW colors1 AS SELECT name FROM blue; + CREATE VIEW COLORS2 AS SELECT name FROM blue UNION SELECT name FROM yellow; + CREATE TABLE RED(name); + CREATE INDEX indexRed1 ON RED(name); + CREATE INDEX INDEXRed2 ON RED(name); + CREATE TRIGGER update_RED UPDATE OF name ON RED + BEGIN + DELETE FROM yellow; + END; + CREATE TABLE yellow(name); + + """) + } + } + + func test_dumpSchema_ignores_shadow_tables() throws { + guard sqlite3_libversion_number() >= 3037000 else { + throw XCTSkip("Can't detect shadow tables") + } + + try makeDatabaseQueue().write { db in + try db.create(table: "document") { t in + t.autoIncrementedPrimaryKey("id") + t.column("body") + } + + try db.execute(sql: "INSERT INTO document VALUES (1, 'Hello world!')") + + try db.create(virtualTable: "document_ft", using: FTS4()) { t in + t.synchronize(withTable: "document") + t.column("body") + } + + let stream = TestStream() + try db.dumpSchema(to: stream) + print(stream.output) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "document" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "body"); + CREATE TRIGGER "__document_ft_ai" AFTER INSERT ON "document" BEGIN + INSERT INTO "document_ft"("docid", "body") VALUES(new."id", new."body"); + END; + CREATE TRIGGER "__document_ft_au" AFTER UPDATE ON "document" BEGIN + INSERT INTO "document_ft"("docid", "body") VALUES(new."id", new."body"); + END; + CREATE TRIGGER "__document_ft_bd" BEFORE DELETE ON "document" BEGIN + DELETE FROM "document_ft" WHERE docid=old."id"; + END; + CREATE TRIGGER "__document_ft_bu" BEFORE UPDATE ON "document" BEGIN + DELETE FROM "document_ft" WHERE docid=old."id"; + END; + CREATE VIRTUAL TABLE "document_ft" USING fts4(body, content="document"); + + """) + } + } + + func test_dumpSchema_ignores_GRDB_internal_tables() throws { + let dbQueue = try makeDatabaseQueue() + var migrator = DatabaseMigrator() + migrator.registerMigration("v1") { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + } + } + try migrator.migrate(dbQueue) + + try dbQueue.read { db in + let stream = TestStream() + try db.dumpSchema(to: stream) + print(stream.output) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + + """) + } + } + + // MARK: - Database content dump + + func test_dumpContent() throws { + try makeRugbyDatabase().read { db in + do { + // Default format + let stream = TestStream() + try db.dumpContent(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "teamId" TEXT REFERENCES "team"("id"), "name" TEXT NOT NULL); + CREATE INDEX "player_on_teamId" ON "player"("teamId"); + CREATE TABLE "team" ("id" TEXT PRIMARY KEY NOT NULL, "name" TEXT NOT NULL, "color" TEXT NOT NULL); + + player + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + team + ENG|England Rugby|white + FRA|XV de France|blue + + """) + } + do { + // Custom format + let stream = TestStream() + try db.dumpContent(format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "teamId" TEXT REFERENCES "team"("id"), "name" TEXT NOT NULL); + CREATE INDEX "player_on_teamId" ON "player"("teamId"); + CREATE TABLE "team" ("id" TEXT PRIMARY KEY NOT NULL, "name" TEXT NOT NULL, "color" TEXT NOT NULL); + + player + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}, + {"id":3,"teamId":null,"name":"Gwendal Roué"}] + + team + [{"id":"ENG","name":"England Rugby","color":"white"}, + {"id":"FRA","name":"XV de France","color":"blue"}] + + """) + } + } + } + + func test_dumpContent_empty_database() throws { + try makeDatabaseQueue().read { db in + let stream = TestStream() + try db.dumpContent(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + + """) + } + } + + func test_dumpContent_empty_tables() throws { + try makeDatabaseQueue().write { db in + try db.execute(literal: """ + CREATE TABLE blue(name); + CREATE TABLE red(name); + CREATE TABLE yellow(name); + INSERT INTO red VALUES ('vermillon') + """) + let stream = TestStream() + try db.dumpContent(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE blue(name); + CREATE TABLE red(name); + CREATE TABLE yellow(name); + + blue + + red + vermillon + + yellow + + """) + } + } + + func test_dumpContent_sqlite_master_ordering() throws { + try makeDatabaseQueue().write { db in + try db.execute(literal: """ + CREATE TABLE blue(name); + CREATE TABLE RED(name); + CREATE TABLE yellow(name); + CREATE INDEX index_blue1 ON blue(name); + CREATE INDEX INDEX_blue2 ON blue(name); + CREATE INDEX indexRed1 ON RED(name); + CREATE INDEX INDEXRed2 ON RED(name); + CREATE VIEW colors1 AS SELECT name FROM blue; + CREATE VIEW COLORS2 AS SELECT name FROM blue UNION SELECT name FROM yellow; + CREATE TRIGGER update_blue UPDATE OF name ON blue + BEGIN + DELETE FROM RED; + END; + CREATE TRIGGER update_RED UPDATE OF name ON RED + BEGIN + DELETE FROM yellow; + END; + """) + let stream = TestStream() + try db.dumpContent(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE blue(name); + CREATE INDEX index_blue1 ON blue(name); + CREATE INDEX INDEX_blue2 ON blue(name); + CREATE TRIGGER update_blue UPDATE OF name ON blue + BEGIN + DELETE FROM RED; + END; + CREATE VIEW colors1 AS SELECT name FROM blue; + CREATE VIEW COLORS2 AS SELECT name FROM blue UNION SELECT name FROM yellow; + CREATE TABLE RED(name); + CREATE INDEX indexRed1 ON RED(name); + CREATE INDEX INDEXRed2 ON RED(name); + CREATE TRIGGER update_RED UPDATE OF name ON RED + BEGIN + DELETE FROM yellow; + END; + CREATE TABLE yellow(name); + + blue + + RED + + yellow + + """) + } + } + + func test_dumpContent_ignores_shadow_tables() throws { + guard sqlite3_libversion_number() >= 3037000 else { + throw XCTSkip("Can't detect shadow tables") + } + + try makeDatabaseQueue().write { db in + try db.create(table: "document") { t in + t.autoIncrementedPrimaryKey("id") + t.column("body") + } + + try db.execute(sql: "INSERT INTO document VALUES (1, 'Hello world!')") + + try db.create(virtualTable: "document_ft", using: FTS4()) { t in + t.synchronize(withTable: "document") + t.column("body") + } + + let stream = TestStream() + try db.dumpContent(to: stream) + print(stream.output) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "document" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "body"); + CREATE TRIGGER "__document_ft_ai" AFTER INSERT ON "document" BEGIN + INSERT INTO "document_ft"("docid", "body") VALUES(new."id", new."body"); + END; + CREATE TRIGGER "__document_ft_au" AFTER UPDATE ON "document" BEGIN + INSERT INTO "document_ft"("docid", "body") VALUES(new."id", new."body"); + END; + CREATE TRIGGER "__document_ft_bd" BEFORE DELETE ON "document" BEGIN + DELETE FROM "document_ft" WHERE docid=old."id"; + END; + CREATE TRIGGER "__document_ft_bu" BEFORE UPDATE ON "document" BEGIN + DELETE FROM "document_ft" WHERE docid=old."id"; + END; + CREATE VIRTUAL TABLE "document_ft" USING fts4(body, content="document"); + + document + 1|Hello world! + + document_ft + Hello world! + + """) + } + } + + func test_dumpContent_ignores_GRDB_internal_tables() throws { + let dbQueue = try makeDatabaseQueue() + var migrator = DatabaseMigrator() + migrator.registerMigration("v1") { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + } + } + try migrator.migrate(dbQueue) + + try dbQueue.read { db in + let stream = TestStream() + try db.dumpContent(to: stream) + print(stream.output) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + + player + + """) + } + } + + // MARK: - Support Databases + + private func makeValuesDatabase() throws -> DatabaseQueue { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.execute(literal: """ + CREATE TABLE value(name, value); + + INSERT INTO value VALUES ('blob: ascii long', CAST('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus.' AS BLOB)); + INSERT INTO value VALUES ('blob: ascii short', CAST('Hello' AS BLOB)); + INSERT INTO value VALUES ('blob: ascii tab', CAST('['||char(9)||']' AS BLOB)); + INSERT INTO value VALUES ('blob: ascii line feed', CAST('['||char(10)||']' AS BLOB)); + INSERT INTO value VALUES ('blob: ascii apostrophe', CAST('['']' AS BLOB)); + INSERT INTO value VALUES ('blob: ascii double quote', CAST('["]' AS BLOB)); + INSERT INTO value VALUES ('blob: binary short', X'80'); + INSERT INTO value VALUES ('blob: empty', x''); + INSERT INTO value VALUES ('blob: utf8 short', CAST('您好🙂' AS BLOB)); + INSERT INTO value VALUES ('blob: uuid', x'69BF8A9CD9F04777BD1193451D84CBCF'); + + INSERT INTO value VALUES ('double: -1.0', -1.0); + INSERT INTO value VALUES ('double: -inf', \(-1.0 / 0)); + INSERT INTO value VALUES ('double: 0.0', 0.0); + INSERT INTO value VALUES ('double: 123.45', 123.45); + INSERT INTO value VALUES ('double: inf', \(1.0 / 0)); + INSERT INTO value VALUES ('double: nan', \(0.0 / 0)); + + INSERT INTO value VALUES ('integer: 0', 0); + INSERT INTO value VALUES ('integer: -1', -1); + INSERT INTO value VALUES ('integer: 123', 123); + INSERT INTO value VALUES ('integer: max', 9223372036854775807); + INSERT INTO value VALUES ('integer: min', -9223372036854775808); + + INSERT INTO value VALUES ('null', NULL); + + INSERT INTO value VALUES ('text: empty', ''); + INSERT INTO value VALUES ('text: ascii apostrophe', '['']'); + INSERT INTO value VALUES ('text: ascii backslash', '[\\]'); + INSERT INTO value VALUES ('text: ascii double quote', '["]'); + INSERT INTO value VALUES ('text: ascii long', 'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi tristique tempor condimentum. Pellentesque pharetra lacus non ante sollicitudin auctor. Vestibulum sit amet mauris vitae urna non luctus.'); + INSERT INTO value VALUES ('text: ascii line feed', '['||char(10)||']'); + INSERT INTO value VALUES ('text: ascii short', 'Hello'); + INSERT INTO value VALUES ('text: ascii slash', '[/]'); + INSERT INTO value VALUES ('text: ascii tab', '['||char(9)||']'); + INSERT INTO value VALUES ('text: ascii url', 'https://github.com/groue/GRDB.swift'); + INSERT INTO value VALUES ('text: utf8 short', '您好🙂'); + """) + } + return dbQueue + } + + private func makeRugbyDatabase() throws -> DatabaseQueue { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "team") { t in + t.primaryKey("id", .text) + t.column("name", .text).notNull() + t.column("color", .text).notNull() + } + + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.belongsTo("team") + t.column("name", .text).notNull() + } + + let england = Team(id: "ENG", name: "England Rugby", color: "white") + let france = Team(id: "FRA", name: "XV de France", color: "blue") + + try england.insert(db) + try france.insert(db) + + _ = try Player(name: "Antoine Dupond", teamId: france.id).inserted(db) + _ = try Player(name: "Owen Farrell", teamId: england.id).inserted(db) + _ = try Player(name: "Gwendal Roué", teamId: nil).inserted(db) + } + return dbQueue + } +} diff --git a/Tests/GRDBTests/DatabaseErrorTests.swift b/Tests/GRDBTests/DatabaseErrorTests.swift index fe7b424488..8bba886ff5 100644 --- a/Tests/GRDBTests/DatabaseErrorTests.swift +++ b/Tests/GRDBTests/DatabaseErrorTests.swift @@ -205,7 +205,7 @@ class DatabaseErrorTests: GRDBTestCase { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in try db.create(table: "parents") { $0.column("id", .integer).primaryKey() } - try db.create(table: "children") { $0.column("parentId", .integer).references("parents") } + try db.create(table: "children") { $0.belongsTo("parent") } do { try db.execute(sql: "INSERT INTO children (parentId) VALUES (1)") } catch let error as DatabaseError { @@ -219,7 +219,7 @@ class DatabaseErrorTests: GRDBTestCase { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in try db.create(table: "parents") { $0.column("id", .integer).primaryKey() } - try db.create(table: "children") { $0.column("parentId", .integer).references("parents") } + try db.create(table: "children") { $0.belongsTo("parent") } do { try db.execute(sql: "INSERT INTO children (parentId) VALUES (1)") } catch let error as NSError { diff --git a/Tests/GRDBTests/DatabaseMigratorTests.swift b/Tests/GRDBTests/DatabaseMigratorTests.swift index 144686272e..e9a65cba43 100644 --- a/Tests/GRDBTests/DatabaseMigratorTests.swift +++ b/Tests/GRDBTests/DatabaseMigratorTests.swift @@ -35,7 +35,7 @@ class DatabaseMigratorTests : GRDBTestCase { } func testEmptyMigratorPublisher() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -147,7 +147,7 @@ class DatabaseMigratorTests : GRDBTestCase { } func testNonEmptyMigratorPublisher() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -203,7 +203,7 @@ class DatabaseMigratorTests : GRDBTestCase { } func testEmptyMigratorPublisherIsAsynchronous() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -229,7 +229,7 @@ class DatabaseMigratorTests : GRDBTestCase { } func testNonEmptyMigratorPublisherIsAsynchronous() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -256,7 +256,7 @@ class DatabaseMigratorTests : GRDBTestCase { } func testMigratorPublisherDefaultScheduler() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -285,7 +285,7 @@ class DatabaseMigratorTests : GRDBTestCase { } func testMigratorPublisherCustomScheduler() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -815,6 +815,31 @@ class DatabaseMigratorTests : GRDBTestCase { try XCTAssertTrue(dbQueue.read { try $0.tableExists("t2") }) } + // Regression test for + func testEraseDatabaseOnSchemaChangeIgnoresInternalSchemaObjects() throws { + // Given a migrator with eraseDatabaseOnSchemaChange + var migrator = DatabaseMigrator() + migrator.eraseDatabaseOnSchemaChange = true + migrator.registerMigration("1") { db in + try db.execute(sql: "CREATE TABLE t(id INTEGER PRIMARY KEY)") + } + let dbQueue = try makeDatabaseQueue() + try migrator.migrate(dbQueue) + + // When we add an internal schema object (sqlite_stat1) + try dbQueue.write { db in + try db.execute(sql: """ + INSERT INTO t DEFAULT VALUES; + ANALYZE; + """) + try XCTAssertTrue(db.tableExists("sqlite_stat1")) + } + + // Then 2nd migration does not erase database + try migrator.migrate(dbQueue) + try XCTAssertEqual(dbQueue.read { try Int.fetchOne($0, sql: "SELECT id FROM t") }, 1) + } + func testEraseDatabaseOnSchemaChangeWithRenamedMigration() throws { let dbQueue = try makeDatabaseQueue() diff --git a/Tests/GRDBTests/DatabasePoolReleaseMemoryTests.swift b/Tests/GRDBTests/DatabasePoolReleaseMemoryTests.swift index 6e338372a1..9b6bb306c4 100644 --- a/Tests/GRDBTests/DatabasePoolReleaseMemoryTests.swift +++ b/Tests/GRDBTests/DatabasePoolReleaseMemoryTests.swift @@ -120,83 +120,129 @@ class DatabasePoolReleaseMemoryTests: GRDBTestCase { #endif - // TODO: fix flaky test -// func testDatabasePoolReleaseMemoryClosesReaderConnections() throws { -// let countQueue = DispatchQueue(label: "GRDB") -// var openConnectionCount = 0 -// var totalOpenConnectionCount = 0 -// -// dbConfiguration.SQLiteConnectionDidOpen = { -// countQueue.sync { -// totalOpenConnectionCount += 1 -// openConnectionCount += 1 -// } -// } -// -// dbConfiguration.SQLiteConnectionDidClose = { -// countQueue.sync { -// openConnectionCount -= 1 -// } -// } -// -// let dbPool = try makeDatabasePool() -// try dbPool.write { db in -// try db.execute(sql: "CREATE TABLE items (id INTEGER PRIMARY KEY)") -// for _ in 0..<2 { -// try db.execute(sql: "INSERT INTO items (id) VALUES (NULL)") -// } -// } -// -// // Block 1 Block 2 Block3 -// // SELECT * FROM items -// // step -// // > -// let s1 = DispatchSemaphore(value: 0) -// // SELECT * FROM items -// // step -// // > -// let s2 = DispatchSemaphore(value: 0) -// // step step -// // > -// let s3 = DispatchSemaphore(value: 0) -// // end end releaseMemory -// -// let block1 = { () in -// try! dbPool.read { db in -// let cursor = try Row.fetchCursor(db, sql: "SELECT * FROM items") -// XCTAssertTrue(try cursor.next() != nil) -// s1.signal() -// _ = s2.wait(timeout: .distantFuture) -// XCTAssertTrue(try cursor.next() != nil) -// s3.signal() -// XCTAssertTrue(try cursor.next() == nil) -// } -// } -// let block2 = { () in -// _ = s1.wait(timeout: .distantFuture) -// try! dbPool.read { db in -// let cursor = try Row.fetchCursor(db, sql: "SELECT * FROM items") -// XCTAssertTrue(try cursor.next() != nil) -// s2.signal() -// XCTAssertTrue(try cursor.next() != nil) -// XCTAssertTrue(try cursor.next() == nil) -// } -// } -// let block3 = { () in -// _ = s3.wait(timeout: .distantFuture) -// dbPool.releaseMemory() -// } -// let blocks = [block1, block2, block3] -// DispatchQueue.concurrentPerform(iterations: blocks.count) { index in // FIXME: this crashes sometimes -// blocks[index]() -// } -// -// // Two readers, one writer -// XCTAssertEqual(totalOpenConnectionCount, 3) -// -// // Writer is still open -// XCTAssertEqual(openConnectionCount, 1) -// } + func test_DatabasePool_releaseMemory_closes_reader_connections() throws { + // A complicated test setup that opens multiple reader connections. + let countQueue = DispatchQueue(label: "GRDB") + var openConnectionCount = 0 + var totalOpenConnectionCount = 0 + + dbConfiguration.SQLiteConnectionDidOpen = { + countQueue.sync { + totalOpenConnectionCount += 1 + openConnectionCount += 1 + } + } + + dbConfiguration.SQLiteConnectionDidClose = { + countQueue.sync { + openConnectionCount -= 1 + } + } + + let dbPool = try makeDatabasePool() + try dbPool.write { db in + try db.execute(sql: "CREATE TABLE items (id INTEGER PRIMARY KEY)") + for _ in 0..<2 { + try db.execute(sql: "INSERT INTO items (id) VALUES (NULL)") + } + } + + // Block 1 Block 2 Block3 + // SELECT * FROM items + // step + // > + let s1 = DispatchSemaphore(value: 0) + // SELECT * FROM items + // step + // > + let s2 = DispatchSemaphore(value: 0) + // step step + // > + let s3 = DispatchSemaphore(value: 0) + // end end releaseMemory + + let block1 = { () in + try! dbPool.read { db in + let cursor = try Row.fetchCursor(db, sql: "SELECT * FROM items") + XCTAssertTrue(try cursor.next() != nil) + s1.signal() + _ = s2.wait(timeout: .distantFuture) + XCTAssertTrue(try cursor.next() != nil) + s3.signal() + XCTAssertTrue(try cursor.next() == nil) + } + } + let block2 = { () in + _ = s1.wait(timeout: .distantFuture) + try! dbPool.read { db in + let cursor = try Row.fetchCursor(db, sql: "SELECT * FROM items") + XCTAssertTrue(try cursor.next() != nil) + s2.signal() + XCTAssertTrue(try cursor.next() != nil) + XCTAssertTrue(try cursor.next() == nil) + } + } + let block3 = { () in + _ = s3.wait(timeout: .distantFuture) + dbPool.releaseMemory() + } + let blocks = [block1, block2, block3] + DispatchQueue.concurrentPerform(iterations: blocks.count) { index in // FIXME: this crashes sometimes + blocks[index]() + } + + // Two readers, one writer + XCTAssertEqual(totalOpenConnectionCount, 3) + + // Writer is still open + XCTAssertEqual(openConnectionCount, 1) + } + + func test_DatabasePool_releaseMemory_closes_reader_connections_when_persistentReadOnlyConnections_is_false() throws { + var persistentConnectionCount = 0 + + dbConfiguration.SQLiteConnectionDidOpen = { + persistentConnectionCount += 1 + } + + dbConfiguration.SQLiteConnectionDidClose = { + persistentConnectionCount -= 1 + } + + dbConfiguration.persistentReadOnlyConnections = false + + let dbPool = try makeDatabasePool() + XCTAssertEqual(persistentConnectionCount, 1) // writer + + try dbPool.read { _ in } + XCTAssertEqual(persistentConnectionCount, 2) // writer + reader + + dbPool.releaseMemory() + XCTAssertEqual(persistentConnectionCount, 1) // writer + } + + func test_DatabasePool_releaseMemory_does_not_close_reader_connections_when_persistentReadOnlyConnections_is_true() throws { + var persistentConnectionCount = 0 + + dbConfiguration.SQLiteConnectionDidOpen = { + persistentConnectionCount += 1 + } + + dbConfiguration.SQLiteConnectionDidClose = { + persistentConnectionCount -= 1 + } + + dbConfiguration.persistentReadOnlyConnections = true + + let dbPool = try makeDatabasePool() + XCTAssertEqual(persistentConnectionCount, 1) // writer + + try dbPool.read { _ in } + XCTAssertEqual(persistentConnectionCount, 2) // writer + reader + + dbPool.releaseMemory() + XCTAssertEqual(persistentConnectionCount, 2) // writer + reader + } func testBlocksRetainConnection() throws { let countQueue = DispatchQueue(label: "GRDB") @@ -238,7 +284,7 @@ class DatabasePoolReleaseMemoryTests: GRDBTestCase { s2.signal() } let block2 = { [weak dbPool] () in - if let dbPool = dbPool { + if let dbPool { try! dbPool.read { db in s1.signal() _ = s2.wait(timeout: .distantFuture) @@ -282,7 +328,7 @@ class DatabasePoolReleaseMemoryTests: GRDBTestCase { let block2 = { [weak dbPool] () in var statement: Statement? = nil do { - if let dbPool = dbPool { + if let dbPool { do { try dbPool.write { db in statement = try db.makeStatement(sql: "CREATE TABLE items (id INTEGER PRIMARY KEY)") diff --git a/Tests/GRDBTests/DatabasePoolTests.swift b/Tests/GRDBTests/DatabasePoolTests.swift index 22813d232a..7580b1a347 100644 --- a/Tests/GRDBTests/DatabasePoolTests.swift +++ b/Tests/GRDBTests/DatabasePoolTests.swift @@ -2,12 +2,145 @@ import XCTest import GRDB class DatabasePoolTests: GRDBTestCase { + func testJournalModeConfiguration() throws { + do { + // Factory default + let config = Configuration() + let dbPool = try makeDatabasePool(filename: "factory", configuration: config) + let journalMode = try dbPool.read { db in + try String.fetchOne(db, sql: "PRAGMA journal_mode") + } + XCTAssertEqual(journalMode, "wal") + } + do { + // Explicit default + var config = Configuration() + config.journalMode = .default + let dbPool = try makeDatabasePool(filename: "default", configuration: config) + let journalMode = try dbPool.read { db in + try String.fetchOne(db, sql: "PRAGMA journal_mode") + } + XCTAssertEqual(journalMode, "wal") + } + do { + // Explicit wal + var config = Configuration() + config.journalMode = .wal + let dbPool = try makeDatabasePool(filename: "wal", configuration: config) + let journalMode = try dbPool.read { db in + try String.fetchOne(db, sql: "PRAGMA journal_mode") + } + XCTAssertEqual(journalMode, "wal") + } + } + func testDatabasePoolCreatesWalShm() throws { - let dbPool = try makeDatabasePool() - withExtendedLifetime(dbPool) { + let dbPool = try makeDatabasePool(filename: "test") + try withExtendedLifetime(dbPool) { + let fm = FileManager() + XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-wal")) + XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-shm")) + +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) + // A non-empty wal file makes sure ValueObservation can use wal snapshots. + // See + let walURL = URL(fileURLWithPath: dbPool.path + "-wal") + let walSize = try walURL.resourceValues(forKeys: [.fileSizeKey]).fileSize! + XCTAssertGreaterThan(walSize, 0) +#endif + } + } + + func testDatabasePoolCreatesWalShmFromNonWalDatabase() throws { + do { + let dbQueue = try makeDatabaseQueue(filename: "test") + try dbQueue.writeWithoutTransaction { db in + try db.execute(sql: "CREATE TABLE t(a)") + } + } + do { + let dbPool = try makeDatabasePool(filename: "test") + try withExtendedLifetime(dbPool) { + let fm = FileManager() + XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-wal")) + XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-shm")) + +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) + // A non-empty wal file makes sure ValueObservation can use wal snapshots. + // See + let walURL = URL(fileURLWithPath: dbPool.path + "-wal") + let walSize = try walURL.resourceValues(forKeys: [.fileSizeKey]).fileSize! + XCTAssertGreaterThan(walSize, 0) +#endif + } + } + } + + func testDatabasePoolCreatesWalShmFromTruncatedWalFile() throws { + do { + let dbPool = try makeDatabasePool(filename: "test") + try dbPool.writeWithoutTransaction { db in + try db.execute(sql: "CREATE TABLE t(a)") + try db.checkpoint(.truncate) + } + } + do { + let dbPool = try makeDatabasePool(filename: "test") + try withExtendedLifetime(dbPool) { + let fm = FileManager() + XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-wal")) + XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-shm")) + +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) + // A non-empty wal file makes sure ValueObservation can use wal snapshots. + // See + let walURL = URL(fileURLWithPath: dbPool.path + "-wal") + let walSize = try walURL.resourceValues(forKeys: [.fileSizeKey]).fileSize! + XCTAssertGreaterThan(walSize, 0) +#endif + } + } + } + + func testDatabasePoolCreatesWalShmFromIssue1383() throws { + let url = testBundle.url(forResource: "Issue1383", withExtension: "sqlite")! + // Delete files created by previous test runs + try? FileManager.default.removeItem(at: url.deletingLastPathComponent().appendingPathComponent("Issue1383.sqlite-wal")) + try? FileManager.default.removeItem(at: url.deletingLastPathComponent().appendingPathComponent("Issue1383.sqlite-shm")) + + let dbPool = try DatabasePool(path: url.path) + try withExtendedLifetime(dbPool) { let fm = FileManager() XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-wal")) XCTAssertTrue(fm.fileExists(atPath: dbPool.path + "-shm")) + +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) + // A non-empty wal file makes sure ValueObservation can use wal snapshots. + // See + let walURL = URL(fileURLWithPath: dbPool.path + "-wal") + let walSize = try walURL.resourceValues(forKeys: [.fileSizeKey]).fileSize! + XCTAssertGreaterThan(walSize, 0) +#endif + } + } + + func testCanReadFromNewInstance() throws { + let dbPool = try makeDatabasePool() + try dbPool.read { _ in } + } + + func testCanReadFromTruncatedWalFile() throws { + do { + let dbPool = try makeDatabasePool(filename: "test") + try dbPool.writeWithoutTransaction { db in + try db.execute(sql: "CREATE TABLE t(a)") + try db.checkpoint(.truncate) + } + } + do { + let dbPool = try makeDatabasePool(filename: "test") + let count = try dbPool.read(Table("t").fetchCount) + XCTAssertEqual(count, 0) } } diff --git a/Tests/GRDBTests/DatabaseQueueConcurrencyTests.swift b/Tests/GRDBTests/DatabaseQueueConcurrencyTests.swift index 0fcbd6329e..613d6b5ad8 100644 --- a/Tests/GRDBTests/DatabaseQueueConcurrencyTests.swift +++ b/Tests/GRDBTests/DatabaseQueueConcurrencyTests.swift @@ -103,7 +103,7 @@ class ConcurrencyTests: GRDBTestCase { _ = group.wait(timeout: .distantFuture) - if let concurrencyError = concurrencyError { + if let concurrencyError { XCTAssertEqual(concurrencyError.resultCode, .SQLITE_BUSY) XCTAssertEqual(concurrencyError.sql, "INSERT INTO stuffs (id) VALUES (NULL)") } else { @@ -159,7 +159,7 @@ class ConcurrencyTests: GRDBTestCase { _ = group.wait(timeout: .distantFuture) - if let concurrencyError = concurrencyError { + if let concurrencyError { XCTAssertEqual(concurrencyError.resultCode, .SQLITE_BUSY) XCTAssertEqual(concurrencyError.sql, "BEGIN EXCLUSIVE TRANSACTION") } else { @@ -215,7 +215,7 @@ class ConcurrencyTests: GRDBTestCase { _ = group.wait(timeout: .distantFuture) - if let concurrencyError = concurrencyError { + if let concurrencyError { XCTAssertEqual(concurrencyError.resultCode, .SQLITE_BUSY) XCTAssertEqual(concurrencyError.sql, "BEGIN IMMEDIATE TRANSACTION") } else { diff --git a/Tests/GRDBTests/DatabaseQueueInMemoryCopyTests.swift b/Tests/GRDBTests/DatabaseQueueInMemoryCopyTests.swift new file mode 100644 index 0000000000..5b070a4c4e --- /dev/null +++ b/Tests/GRDBTests/DatabaseQueueInMemoryCopyTests.swift @@ -0,0 +1,166 @@ +import XCTest +import GRDB + +private final class TestStream: TextOutputStream { + var output: String + + init() { + output = "" + } + + func write(_ string: String) { + output.append(string) + } +} + +final class DatabaseQueueInMemoryCopyTests: GRDBTestCase { + private func makeSourceDatabase() throws -> DatabaseQueue { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("name", .text) + t.column("score", .integer) + } + try db.execute(sql: "INSERT INTO player VALUES (NULL, 'Arthur', 500)") + try db.execute(sql: "INSERT INTO player VALUES (NULL, 'Barbara', 1000)") + } + return dbQueue + } + + func test_inMemoryCopy() throws { + let source = try makeSourceDatabase() + let dbQueue = try DatabaseQueue.inMemoryCopy( + fromPath: source.path, + configuration: dbConfiguration) + + // Test that content was faithfully copied + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + """) + } + + func test_inMemoryCopy_write() throws { + let source = try makeSourceDatabase() + let dbQueue = try DatabaseQueue.inMemoryCopy( + fromPath: source.path, + configuration: dbConfiguration) + + // The in-memory copy is writable (necessary for testing migrations) + try dbQueue.write { db in + try db.execute(sql: "INSERT INTO player VALUES (NULL, 'Craig', 200)") + } + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + 3,'Craig',200 + + """) + } + + func test_inMemoryCopy_readOnly() throws { + let source = try makeSourceDatabase() + var config = dbConfiguration! + config.readonly = true + let dbQueue = try DatabaseQueue.inMemoryCopy(fromPath: source.path, configuration: config) + + // Test that the copy is read-only + XCTAssertThrowsError(try dbQueue.write { try $0.execute(sql: "DROP TABLE player") }) { error in + guard let dbError = error as? DatabaseError else { + XCTFail("Expected DatabaseError") + return + } + XCTAssertEqual(dbError.message, "attempt to write a readonly database") + } + + // Test that the copy is still read-only after a read + try dbQueue.read { _ in } + XCTAssertThrowsError(try dbQueue.write { try $0.execute(sql: "DROP TABLE player") }) { error in + guard let dbError = error as? DatabaseError else { + XCTFail("Expected DatabaseError") + return + } + XCTAssertEqual(dbError.message, "attempt to write a readonly database") + } + + // Test that content was faithfully copied + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + """) + } + + func test_migrations_are_testable() throws { + // Given a migrator… + var migrator = DatabaseMigrator() + migrator.registerMigration("v1") { try $0.create(table: "team") { $0.autoIncrementedPrimaryKey("id") } } + migrator.registerMigration("v2") { try $0.create(table: "match") { $0.autoIncrementedPrimaryKey("id") } } + migrator.registerMigration("v3") { try $0.drop(table: "match") } + + // …GRDB users can test the migrator on fixtures + let source = try makeSourceDatabase() + let dbQueue = try DatabaseQueue.inMemoryCopy( + fromPath: source.path, + configuration: dbConfiguration) + + try migrator.migrate(dbQueue, upTo: "v2") + do { + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "match" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + CREATE TABLE "team" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + + match + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + team + + """) + } + + try migrator.migrate(dbQueue, upTo: "v3") + do { + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + CREATE TABLE "team" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + team + + """) + } + } +} diff --git a/Tests/GRDBTests/DatabaseQueueReleaseMemoryTests.swift b/Tests/GRDBTests/DatabaseQueueReleaseMemoryTests.swift index 6868f8bb02..7c5b9428eb 100644 --- a/Tests/GRDBTests/DatabaseQueueReleaseMemoryTests.swift +++ b/Tests/GRDBTests/DatabaseQueueReleaseMemoryTests.swift @@ -73,7 +73,7 @@ class DatabaseQueueReleaseMemoryTests: GRDBTestCase { s2.signal() } let block2 = { [weak dbQueue] () in - if let dbQueue = dbQueue { + if let dbQueue { try! dbQueue.write { db in s1.signal() _ = s2.wait(timeout: .distantFuture) @@ -118,7 +118,7 @@ class DatabaseQueueReleaseMemoryTests: GRDBTestCase { let block2 = { [weak dbQueue] () in var statement: Statement? = nil do { - if let dbQueue = dbQueue { + if let dbQueue { do { try dbQueue.write { db in statement = try db.makeStatement(sql: "CREATE TABLE items (id INTEGER PRIMARY KEY)") diff --git a/Tests/GRDBTests/DatabaseQueueSchemaCacheTests.swift b/Tests/GRDBTests/DatabaseQueueSchemaCacheTests.swift index e99891f530..3f54530f6c 100644 --- a/Tests/GRDBTests/DatabaseQueueSchemaCacheTests.swift +++ b/Tests/GRDBTests/DatabaseQueueSchemaCacheTests.swift @@ -150,7 +150,7 @@ class DatabaseQueueSchemaCacheTests : GRDBTestCase { } func testMainShadowedByAttachedDatabase() throws { - #if SQLITE_HAS_CODEC + #if GRDBCIPHER_USE_ENCRYPTION // Avoid error due to key not being provided: // file is not a database - while executing `ATTACH DATABASE...` throw XCTSkip("This test does not support encrypted databases") @@ -229,4 +229,171 @@ class DatabaseQueueSchemaCacheTests : GRDBTestCase { try XCTAssertFalse(db.tableExists("item")) } } + + func testTableExistsThrowsWhenUnknownSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER); + CREATE VIEW v AS SELECT * FROM t; + CREATE TRIGGER tr AFTER INSERT ON t BEGIN SELECT 1; END; + """) + do { + _ = try db.tableExists("t", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + do { + _ = try db.viewExists("v", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + do { + _ = try db.triggerExists("tr", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + } + } + + func testExistsWithSpecifiedMainSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER); + CREATE VIEW v AS SELECT * FROM t; + CREATE TRIGGER tr AFTER INSERT ON t BEGIN SELECT 1; END; + """) + let tableExists = try db.tableExists("t", in: "main") + let viewExists = try db.viewExists("v", in: "main") + let triggerExists = try db.triggerExists("tr", in: "main") + XCTAssertTrue(tableExists) + XCTAssertTrue(viewExists) + XCTAssertTrue(triggerExists) + } + } + + func testNotExistsWithSpecifiedMainSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let tableExists = try db.tableExists("t", in: "main") + let viewExists = try db.viewExists("v", in: "main") + let triggerExists = try db.triggerExists("tr", in: "main") + XCTAssertFalse(tableExists) + XCTAssertFalse(viewExists) + XCTAssertFalse(triggerExists) + } + } + + func testExistsWithSpecifiedSchemaWithEntityNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER); + CREATE VIEW v AS SELECT * FROM t; + CREATE TRIGGER tr AFTER INSERT ON t BEGIN SELECT 1; END; + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER); + CREATE VIEW v AS SELECT * FROM t; + CREATE TRIGGER tr AFTER INSERT ON t BEGIN SELECT 1; END; + """) + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + let tableExistsInMain = try db.tableExists("t", in: "main") + let viewExistsInMain = try db.viewExists("v", in: "main") + let triggerExistsInMain = try db.triggerExists("tr", in: "main") + XCTAssertTrue(tableExistsInMain) + XCTAssertTrue(viewExistsInMain) + XCTAssertTrue(triggerExistsInMain) + + let tableExistsInAttached = try db.tableExists("t", in: "attached") + let viewExistsInAttached = try db.viewExists("v", in: "attached") + let triggerExistsInAttached = try db.triggerExists("tr", in: "attached") + XCTAssertTrue(tableExistsInAttached) + XCTAssertTrue(viewExistsInAttached) + XCTAssertTrue(triggerExistsInAttached) + } + } + + func testExistsWithUnspecifiedSchemaWithEntityNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER); + CREATE VIEW v AS SELECT * FROM t; + CREATE TRIGGER tr AFTER INSERT ON t BEGIN SELECT 1; END; + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER); + CREATE VIEW v AS SELECT * FROM t; + CREATE TRIGGER tr AFTER INSERT ON t BEGIN SELECT 1; END; + """) + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + // Some entity with the name exists, but we can't prove from this information which one + // it's getting. `true` is still the correct result. + let tableExists = try db.tableExists("t") + let viewExists = try db.viewExists("v") + let triggerExists = try db.triggerExists("tr") + XCTAssertTrue(tableExists) + XCTAssertTrue(viewExists) + XCTAssertTrue(triggerExists) + } + } + + func testExistsWithUnspecifiedSchemaFindsAttachedDatabase() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER); + CREATE VIEW v AS SELECT * FROM t; + CREATE TRIGGER tr AFTER INSERT ON t BEGIN SELECT 1; END; + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + let tableExists = try db.tableExists("t") + let viewExists = try db.viewExists("v") + let triggerExists = try db.triggerExists("tr") + XCTAssertTrue(tableExists) + XCTAssertTrue(viewExists) + XCTAssertTrue(triggerExists) + } + } } diff --git a/Tests/GRDBTests/DatabaseQueueTemporaryCopyTests.swift b/Tests/GRDBTests/DatabaseQueueTemporaryCopyTests.swift new file mode 100644 index 0000000000..a6f6f53412 --- /dev/null +++ b/Tests/GRDBTests/DatabaseQueueTemporaryCopyTests.swift @@ -0,0 +1,166 @@ +import XCTest +import GRDB + +private final class TestStream: TextOutputStream { + var output: String + + init() { + output = "" + } + + func write(_ string: String) { + output.append(string) + } +} + +final class DatabaseQueueTemporaryCopyTests: GRDBTestCase { + private func makeSourceDatabase() throws -> DatabaseQueue { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("name", .text) + t.column("score", .integer) + } + try db.execute(sql: "INSERT INTO player VALUES (NULL, 'Arthur', 500)") + try db.execute(sql: "INSERT INTO player VALUES (NULL, 'Barbara', 1000)") + } + return dbQueue + } + + func test_temporaryCopy() throws { + let source = try makeSourceDatabase() + let dbQueue = try DatabaseQueue.temporaryCopy( + fromPath: source.path, + configuration: dbConfiguration) + + // Test that content was faithfully copied + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + """) + } + + func test_temporaryCopy_write() throws { + let source = try makeSourceDatabase() + let dbQueue = try DatabaseQueue.temporaryCopy( + fromPath: source.path, + configuration: dbConfiguration) + + // The in-memory copy is writable (necessary for testing migrations) + try dbQueue.write { db in + try db.execute(sql: "INSERT INTO player VALUES (NULL, 'Craig', 200)") + } + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + 3,'Craig',200 + + """) + } + + func test_temporaryCopy_readOnly() throws { + let source = try makeSourceDatabase() + var config = dbConfiguration! + config.readonly = true + let dbQueue = try DatabaseQueue.temporaryCopy(fromPath: source.path, configuration: config) + + // Test that the copy is read-only + XCTAssertThrowsError(try dbQueue.write { try $0.execute(sql: "DROP TABLE player") }) { error in + guard let dbError = error as? DatabaseError else { + XCTFail("Expected DatabaseError") + return + } + XCTAssertEqual(dbError.message, "attempt to write a readonly database") + } + + // Test that the copy is still read-only after a read + try dbQueue.read { _ in } + XCTAssertThrowsError(try dbQueue.write { try $0.execute(sql: "DROP TABLE player") }) { error in + guard let dbError = error as? DatabaseError else { + XCTFail("Expected DatabaseError") + return + } + XCTAssertEqual(dbError.message, "attempt to write a readonly database") + } + + // Test that content was faithfully copied + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + """) + } + + func test_migrations_are_testable() throws { + // Given a migrator… + var migrator = DatabaseMigrator() + migrator.registerMigration("v1") { try $0.create(table: "team") { $0.autoIncrementedPrimaryKey("id") } } + migrator.registerMigration("v2") { try $0.create(table: "match") { $0.autoIncrementedPrimaryKey("id") } } + migrator.registerMigration("v3") { try $0.drop(table: "match") } + + // …GRDB users can test the migrator on fixtures + let source = try makeSourceDatabase() + let dbQueue = try DatabaseQueue.temporaryCopy( + fromPath: source.path, + configuration: dbConfiguration) + + try migrator.migrate(dbQueue, upTo: "v2") + do { + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "match" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + CREATE TABLE "team" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + + match + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + team + + """) + } + + try migrator.migrate(dbQueue, upTo: "v3") + do { + let stream = TestStream() + try dbQueue.dumpContent(format: .quote(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "name" TEXT, "score" INTEGER); + CREATE TABLE "team" ("id" INTEGER PRIMARY KEY AUTOINCREMENT); + + player + 1,'Arthur',500 + 2,'Barbara',1000 + + team + + """) + } + } +} diff --git a/Tests/GRDBTests/DatabaseQueueTests.swift b/Tests/GRDBTests/DatabaseQueueTests.swift index 2c3f26d071..7bd5adf2b5 100644 --- a/Tests/GRDBTests/DatabaseQueueTests.swift +++ b/Tests/GRDBTests/DatabaseQueueTests.swift @@ -3,6 +3,37 @@ import Dispatch import GRDB class DatabaseQueueTests: GRDBTestCase { + func testJournalModeConfiguration() throws { + do { + // Factory default + let config = Configuration() + let dbQueue = try makeDatabaseQueue(filename: "factory", configuration: config) + let journalMode = try dbQueue.read { db in + try String.fetchOne(db, sql: "PRAGMA journal_mode") + } + XCTAssertEqual(journalMode, "delete") + } + do { + // Explicit default + var config = Configuration() + config.journalMode = .default + let dbQueue = try makeDatabaseQueue(filename: "default", configuration: config) + let journalMode = try dbQueue.read { db in + try String.fetchOne(db, sql: "PRAGMA journal_mode") + } + XCTAssertEqual(journalMode, "delete") + } + do { + // Explicit wal + var config = Configuration() + config.journalMode = .wal + let dbQueue = try makeDatabaseQueue(filename: "wal", configuration: config) + let journalMode = try dbQueue.read { db in + try String.fetchOne(db, sql: "PRAGMA journal_mode") + } + XCTAssertEqual(journalMode, "wal") + } + } func testInvalidFileFormat() throws { do { @@ -227,6 +258,140 @@ class DatabaseQueueTests: GRDBTestCase { try test(qos: .userInitiated) } + // MARK: - SQLITE_BUSY prevention + + // See + func test_busy_timeout_does_not_prevent_SQLITE_BUSY_when_write_lock_is_acquired_by_other_connection() throws { + var configuration = dbConfiguration! + configuration.journalMode = .wal + configuration.busyMode = .timeout(1) // Does not help in this case + + let dbQueue1 = try makeDatabaseQueue(filename: "test", configuration: configuration) + let dbQueue2 = try makeDatabaseQueue(filename: "test", configuration: configuration) + + // DB1 DB2 + // BEGIN DEFERRED TRANSACTION + // READ + let s1 = DispatchSemaphore(value: 0) + // BEGIN DEFERRED TRANSACTION + // WRITE + let s2 = DispatchSemaphore(value: 0) + // WRITE -> SQLITE_BUSY because DB2 is already holding the write lock. + let s3 = DispatchSemaphore(value: 0) + // COMMIT + + let block1 = { + try! dbQueue1.inDatabase { db in + try db.inTransaction(.deferred) { + try db.execute(sql: "SELECT * FROM sqlite_master") + s1.signal() + s2.wait() + do { + try db.execute(sql: "CREATE TABLE test1(a)") + XCTFail("Expected error") + } catch DatabaseError.SQLITE_BUSY { + // Test success + } + s3.signal() + return .commit + } + } + } + + let block2 = { + try! dbQueue2.inDatabase { db in + s1.wait() + try db.inTransaction(.deferred) { + try db.execute(sql: "CREATE TABLE test2(a)") + s2.signal() + s3.wait() + return .commit + } + } + } + + let blocks = [block1, block2] + DispatchQueue.concurrentPerform(iterations: blocks.count) { index in + blocks[index]() + } + } + + // See + func test_busy_timeout_does_not_prevent_SQLITE_BUSY_when_write_lock_was_acquired_by_other_connection() throws { + var configuration = dbConfiguration! + configuration.journalMode = .wal + configuration.busyMode = .timeout(1) // Does not help in this case + + let dbQueue1 = try makeDatabaseQueue(filename: "test", configuration: configuration) + let dbQueue2 = try makeDatabaseQueue(filename: "test", configuration: configuration) + + // DB1 DB2 + // BEGIN DEFERRED TRANSACTION + // READ + let s1 = DispatchSemaphore(value: 0) + // WRITE + let s2 = DispatchSemaphore(value: 0) + // WRITE -> SQLITE_BUSY because write lock can't be acquired, + // even though DB2 does no longer hold any lock. + + let block1 = { + try! dbQueue1.inDatabase { db in + try db.inTransaction(.deferred) { + try db.execute(sql: "SELECT * FROM sqlite_master") + s1.signal() + s2.wait() + do { + try db.execute(sql: "CREATE TABLE test1(a)") + XCTFail("Expected error") + } catch DatabaseError.SQLITE_BUSY { + // Test success + } + return .commit + } + } + } + + let block2 = { + try! dbQueue2.inDatabase { db in + s1.wait() + try db.execute(sql: "CREATE TABLE test2(a)") + s2.signal() + } + } + + let blocks = [block1, block2] + DispatchQueue.concurrentPerform(iterations: blocks.count) { index in + blocks[index]() + } + } + + // See + func test_busy_timeout_and_IMMEDIATE_transactions_do_prevent_SQLITE_BUSY() throws { + var configuration = dbConfiguration! + // Test fails when this line is commented + configuration.defaultTransactionKind = .immediate + // Test fails when this line is commented + configuration.busyMode = .timeout(10) + + let dbQueue = try makeDatabaseQueue(filename: "test") + try dbQueue.inDatabase { db in + try db.execute(sql: "PRAGMA journal_mode = wal") + try db.execute(sql: "CREATE TABLE test(a)") + } + + let parallelWritesCount = 50 + DispatchQueue.concurrentPerform(iterations: parallelWritesCount) { index in + let dbQueue = try! makeDatabaseQueue(filename: "test", configuration: configuration) + try! dbQueue.write { db in + _ = try Table("test").fetchCount(db) + try db.execute(sql: "INSERT INTO test VALUES (1)") + } + } + + let count = try dbQueue.read(Table("test").fetchCount) + XCTAssertEqual(count, parallelWritesCount) + } + // MARK: - Closing func testClose() throws { diff --git a/Tests/GRDBTests/DatabaseReaderDumpTests.swift b/Tests/GRDBTests/DatabaseReaderDumpTests.swift new file mode 100644 index 0000000000..303bfb81d4 --- /dev/null +++ b/Tests/GRDBTests/DatabaseReaderDumpTests.swift @@ -0,0 +1,212 @@ +import XCTest +import GRDB + +private final class TestStream: TextOutputStream { + var output: String + + init() { + output = "" + } + + func write(_ string: String) { + output.append(string) + } +} + +private struct Player: Codable, MutablePersistableRecord { + var id: Int64? + var name: String + var teamId: String? + + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} + +private struct Team: Codable, PersistableRecord { + var id: String + var name: String + var color: String +} + +final class DatabaseReaderDumpTests: GRDBTestCase { + func test_dumpSQL() throws { + do { + // Default format + let stream = TestStream() + try makeDatabaseQueue().dumpSQL( + """ + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 'foo'); + INSERT INTO t VALUES (2, 'bar'); + SELECT * FROM t ORDER BY a; + SELECT b FROM t ORDER BY b; + SELECT NULL WHERE NULL; + """, + to: stream) + XCTAssertEqual(stream.output, """ + 1|foo + 2|bar + bar + foo + + """) + } + do { + // Custom format + let stream = TestStream() + try makeDatabaseQueue().dumpSQL( + """ + CREATE TABLE t(a, b); + INSERT INTO t VALUES (1, 'foo'); + INSERT INTO t VALUES (2, 'bar'); + SELECT * FROM t ORDER BY a; + SELECT b FROM t ORDER BY b; + SELECT NULL WHERE NULL; + """, + format: .json(), + to: stream) + XCTAssertEqual(stream.output, """ + [{"a":1,"b":"foo"}, + {"a":2,"b":"bar"}] + [{"b":"bar"}, + {"b":"foo"}] + [] + + """) + } + } + + func test_dumpRequest() throws { + do { + // Default format + let stream = TestStream() + try makeRugbyDatabase().dumpRequest(Player.orderByPrimaryKey(), to: stream) + XCTAssertEqual(stream.output, """ + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + """) + } + do { + // Custom format + let stream = TestStream() + try makeRugbyDatabase().dumpRequest(Player.orderByPrimaryKey(), format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}, + {"id":3,"teamId":null,"name":"Gwendal Roué"}] + + """) + } + } + + func test_dumpTables() throws { + do { + // Default format + let stream = TestStream() + try makeRugbyDatabase().dumpTables(["player", "team"], to: stream) + XCTAssertEqual(stream.output, """ + player + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + team + ENG|England Rugby|white + FRA|XV de France|blue + + """) + } + do { + // Custom format + let stream = TestStream() + try makeRugbyDatabase().dumpTables(["team", "player"], format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + team + [{"id":"ENG","name":"England Rugby","color":"white"}, + {"id":"FRA","name":"XV de France","color":"blue"}] + + player + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}, + {"id":3,"teamId":null,"name":"Gwendal Roué"}] + + """) + } + } + + func test_dumpContent() throws { + do { + // Default format + let stream = TestStream() + try makeRugbyDatabase().dumpContent(to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "teamId" TEXT REFERENCES "team"("id"), "name" TEXT NOT NULL); + CREATE INDEX "player_on_teamId" ON "player"("teamId"); + CREATE TABLE "team" ("id" TEXT PRIMARY KEY NOT NULL, "name" TEXT NOT NULL, "color" TEXT NOT NULL); + + player + 1|FRA|Antoine Dupond + 2|ENG|Owen Farrell + 3||Gwendal Roué + + team + ENG|England Rugby|white + FRA|XV de France|blue + + """) + } + do { + // Custom format + let stream = TestStream() + try makeRugbyDatabase().dumpContent(format: .json(), to: stream) + XCTAssertEqual(stream.output, """ + sqlite_master + CREATE TABLE "player" ("id" INTEGER PRIMARY KEY AUTOINCREMENT, "teamId" TEXT REFERENCES "team"("id"), "name" TEXT NOT NULL); + CREATE INDEX "player_on_teamId" ON "player"("teamId"); + CREATE TABLE "team" ("id" TEXT PRIMARY KEY NOT NULL, "name" TEXT NOT NULL, "color" TEXT NOT NULL); + + player + [{"id":1,"teamId":"FRA","name":"Antoine Dupond"}, + {"id":2,"teamId":"ENG","name":"Owen Farrell"}, + {"id":3,"teamId":null,"name":"Gwendal Roué"}] + + team + [{"id":"ENG","name":"England Rugby","color":"white"}, + {"id":"FRA","name":"XV de France","color":"blue"}] + + """) + } + } + + private func makeRugbyDatabase() throws -> DatabaseQueue { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "team") { t in + t.primaryKey("id", .text) + t.column("name", .text).notNull() + t.column("color", .text).notNull() + } + + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.belongsTo("team") + t.column("name", .text).notNull() + } + + let england = Team(id: "ENG", name: "England Rugby", color: "white") + let france = Team(id: "FRA", name: "XV de France", color: "blue") + + try england.insert(db) + try france.insert(db) + + _ = try Player(name: "Antoine Dupond", teamId: france.id).inserted(db) + _ = try Player(name: "Owen Farrell", teamId: england.id).inserted(db) + _ = try Player(name: "Gwendal Roué", teamId: nil).inserted(db) + } + return dbQueue + } +} diff --git a/Tests/GRDBTests/DatabaseReaderTests.swift b/Tests/GRDBTests/DatabaseReaderTests.swift index fec7c02f57..df99cf9cc0 100644 --- a/Tests/GRDBTests/DatabaseReaderTests.swift +++ b/Tests/GRDBTests/DatabaseReaderTests.swift @@ -18,7 +18,7 @@ class DatabaseReaderTests : GRDBTestCase { } return dbWriter } - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let count = try dbReader.read { db in try Int.fetchOne(db, sql: "SELECT COUNT(*) FROM t") } @@ -33,7 +33,7 @@ class DatabaseReaderTests : GRDBTestCase { #endif } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_ReadCanRead() async throws { func setup(_ dbWriter: T) throws -> T { try dbWriter.write { db in @@ -41,7 +41,7 @@ class DatabaseReaderTests : GRDBTestCase { } return dbWriter } - func test(_ dbReader: DatabaseReader) async throws { + func test(_ dbReader: some DatabaseReader) async throws { let count = try await dbReader.read { db in try Int.fetchOne(db, sql: "SELECT COUNT(*) FROM t") } @@ -57,7 +57,7 @@ class DatabaseReaderTests : GRDBTestCase { } func testReadPreventsDatabaseModification() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { do { try dbReader.read { db in try db.execute(sql: "CREATE TABLE t (id INTEGER PRIMARY KEY)") @@ -75,9 +75,9 @@ class DatabaseReaderTests : GRDBTestCase { #endif } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_ReadPreventsDatabaseModification() async throws { - func test(_ dbReader: DatabaseReader) async throws { + func test(_ dbReader: some DatabaseReader) async throws { do { try await dbReader.read { db in try db.execute(sql: "CREATE TABLE t (id INTEGER PRIMARY KEY)") @@ -104,7 +104,7 @@ class DatabaseReaderTests : GRDBTestCase { } return dbWriter } - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let count = try dbReader.unsafeRead { db in try Int.fetchOne(db, sql: "SELECT COUNT(*) FROM t") } @@ -119,7 +119,7 @@ class DatabaseReaderTests : GRDBTestCase { #endif } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_UnsafeReadCanRead() async throws { func setup(_ dbWriter: T) throws -> T { try dbWriter.write { db in @@ -127,7 +127,7 @@ class DatabaseReaderTests : GRDBTestCase { } return dbWriter } - func test(_ dbReader: DatabaseReader) async throws { + func test(_ dbReader: some DatabaseReader) async throws { let count = try await dbReader.unsafeRead { db in try Int.fetchOne(db, sql: "SELECT COUNT(*) FROM t") } @@ -151,7 +151,7 @@ class DatabaseReaderTests : GRDBTestCase { } return dbWriter } - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let count = try dbReader.unsafeReentrantRead { db in try Int.fetchOne(db, sql: "SELECT COUNT(*) FROM t") } @@ -167,7 +167,7 @@ class DatabaseReaderTests : GRDBTestCase { } func testUnsafeReentrantReadIsReentrant() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { try dbReader.unsafeReentrantRead { db1 in try dbReader.unsafeReentrantRead { db2 in try dbReader.unsafeReentrantRead { db3 in @@ -187,7 +187,7 @@ class DatabaseReaderTests : GRDBTestCase { } func testUnsafeReentrantReadIsReentrantFromWrite() throws { - func test(_ dbWriter: DatabaseWriter) throws { + func test(_ dbWriter: some DatabaseWriter) throws { try dbWriter.write { db1 in try dbWriter.unsafeReentrantRead { db2 in try dbWriter.unsafeReentrantRead { db3 in @@ -205,7 +205,7 @@ class DatabaseReaderTests : GRDBTestCase { // MARK: - AsyncRead func testAsyncRead() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let expectation = self.expectation(description: "updates") let semaphore = DispatchSemaphore(value: 0) var count: Int? @@ -234,7 +234,7 @@ class DatabaseReaderTests : GRDBTestCase { } func testAsyncReadPreventsDatabaseModification() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let expectation = self.expectation(description: "updates") let semaphore = DispatchSemaphore(value: 0) dbReader.asyncRead { dbResult in @@ -265,7 +265,7 @@ class DatabaseReaderTests : GRDBTestCase { // MARK: - Function func testAddFunction() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let value = try dbReader.read { db -> Int? in let f = DatabaseFunction("f", argumentCount: 0, pure: true) { _ in 0 } db.add(function: f) @@ -285,7 +285,7 @@ class DatabaseReaderTests : GRDBTestCase { // MARK: - Collation func testAddCollation() throws { - func test(_ dbReader: DatabaseReader) throws { + func test(_ dbReader: some DatabaseReader) throws { let value = try dbReader.read { db -> Int? in let collation = DatabaseCollation("c") { _, _ in .orderedSame } db.add(collation: collation) @@ -311,7 +311,7 @@ class DatabaseReaderTests : GRDBTestCase { } return dbWriter } - func test(_ source: DatabaseReader) throws { + func test(_ source: some DatabaseReader) throws { let dest = try makeDatabaseQueue(configuration: Configuration()) try source.backup(to: dest) let count = try dest.read { db in diff --git a/Tests/GRDBTests/DatabaseRegionObservationTests.swift b/Tests/GRDBTests/DatabaseRegionObservationTests.swift index bec4f9cf9f..dd4218a00a 100644 --- a/Tests/GRDBTests/DatabaseRegionObservationTests.swift +++ b/Tests/GRDBTests/DatabaseRegionObservationTests.swift @@ -224,6 +224,42 @@ class DatabaseRegionObservationTests: GRDBTestCase { XCTAssertEqual(count, 1) } } + + func test_DatabaseRegionObservation_is_triggered_by_explicit_change_notification() throws { + let dbQueue1 = try makeDatabaseQueue(filename: "test.sqlite") + try dbQueue1.write { db in + try db.execute(sql: "CREATE TABLE test(a)") + } + + let undetectedExpectation = expectation(description: "undetected") + undetectedExpectation.isInverted = true + + let detectedExpectation = expectation(description: "detected") + + let observation = DatabaseRegionObservation(tracking: Table("test")) + let cancellable = observation.start( + in: dbQueue1, + onError: { error in XCTFail("Unexpected error: \(error)") }, + onChange: { _ in + undetectedExpectation.fulfill() + detectedExpectation.fulfill() + }) + + try withExtendedLifetime(cancellable) { + // Change performed from external connection is not detected... + let dbQueue2 = try makeDatabaseQueue(filename: "test.sqlite") + try dbQueue2.write { db in + try db.execute(sql: "INSERT INTO test (a) VALUES (1)") + } + wait(for: [undetectedExpectation], timeout: 2) + + // ... until we perform an explicit change notification + try dbQueue1.write { db in + try db.notifyChanges(in: Table("test")) + } + wait(for: [detectedExpectation], timeout: 2) + } + } // Regression test for https://github.com/groue/GRDB.swift/issues/514 // TODO: uncomment and make this test pass. diff --git a/Tests/GRDBTests/DatabaseRegionTests.swift b/Tests/GRDBTests/DatabaseRegionTests.swift index 11a02dedde..7044aa3a88 100644 --- a/Tests/GRDBTests/DatabaseRegionTests.swift +++ b/Tests/GRDBTests/DatabaseRegionTests.swift @@ -543,6 +543,10 @@ class DatabaseRegionTests : GRDBTestCase { XCTAssertEqual(tableName, "foo") XCTAssertEqual(columnNames, Set(["bar", "baz"])) } + do { + let statement = try db.makeStatement(sql: "UPDATE foo SET bar = 'bar' WHERE baz = 'baz'") + XCTAssertEqual(statement.databaseRegion.description, "foo(baz)") + } } } diff --git a/Tests/GRDBTests/DatabaseSavepointTests.swift b/Tests/GRDBTests/DatabaseSavepointTests.swift index b1858773fe..1483d1a682 100644 --- a/Tests/GRDBTests/DatabaseSavepointTests.swift +++ b/Tests/GRDBTests/DatabaseSavepointTests.swift @@ -5,7 +5,7 @@ func insertItem(_ db: Database, name: String) throws { try db.execute(sql: "INSERT INTO items (name) VALUES (?)", arguments: [name]) } -func fetchAllItemNames(_ dbReader: DatabaseReader) throws -> [String] { +func fetchAllItemNames(_ dbReader: some DatabaseReader) throws -> [String] { try dbReader.read { db in try String.fetchAll(db, sql: "SELECT * FROM items ORDER BY name") } diff --git a/Tests/GRDBTests/DatabaseSnapshotPoolTests.swift b/Tests/GRDBTests/DatabaseSnapshotPoolTests.swift index eb66ff237b..6176865e8e 100644 --- a/Tests/GRDBTests/DatabaseSnapshotPoolTests.swift +++ b/Tests/GRDBTests/DatabaseSnapshotPoolTests.swift @@ -220,7 +220,7 @@ final class DatabaseSnapshotPoolTests: GRDBTestCase { try XCTAssertEqual(dbPool.read(counter.value), 2) } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func test_read_async() async throws { let dbPool = try makeDatabasePool() let counter = try Counter(dbPool: dbPool) // 0 diff --git a/Tests/GRDBTests/DatabaseTests.swift b/Tests/GRDBTests/DatabaseTests.swift index 635b852c0b..145340ab3c 100644 --- a/Tests/GRDBTests/DatabaseTests.swift +++ b/Tests/GRDBTests/DatabaseTests.swift @@ -1,5 +1,5 @@ import XCTest -@testable import GRDB +import GRDB class DatabaseTests : GRDBTestCase { @@ -520,7 +520,6 @@ class DatabaseTests : GRDBTestCase { } } - // Test an internal API func testReadOnly() throws { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in diff --git a/Tests/GRDBTests/DatabaseUUIDEncodingStrategyTests.swift b/Tests/GRDBTests/DatabaseUUIDEncodingStrategyTests.swift index d898df7e75..2245a8be6a 100644 --- a/Tests/GRDBTests/DatabaseUUIDEncodingStrategyTests.swift +++ b/Tests/GRDBTests/DatabaseUUIDEncodingStrategyTests.swift @@ -23,7 +23,7 @@ private struct RecordWithUUID: EncodableRecord, Enco var uuid: UUID } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension RecordWithUUID: Identifiable { var id: UUID { uuid } } @@ -33,7 +33,7 @@ private struct RecordWithOptionalUUID: EncodableReco var uuid: UUID? } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension RecordWithOptionalUUID: Identifiable { var id: UUID? { uuid } } @@ -53,7 +53,11 @@ class DatabaseUUIDEncodingStrategyTests: GRDBTestCase { } } - private func test(strategy: Strategy.Type, encodesUUID uuid: UUID, as value: DatabaseValueConvertible) throws { + private func test( + strategy: Strategy.Type, + encodesUUID uuid: UUID, + as value: any DatabaseValueConvertible) + throws { try test(record: RecordWithUUID(uuid: uuid), expectedStorage: value.databaseValue.storage) try test(record: RecordWithOptionalUUID(uuid: uuid), expectedStorage: value.databaseValue.storage) } @@ -180,7 +184,7 @@ extension DatabaseUUIDEncodingStrategyTests { } func testFilterID() throws { - guard #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Identifiable not available") } @@ -299,7 +303,7 @@ extension DatabaseUUIDEncodingStrategyTests { } func testDeleteID() throws { - guard #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Identifiable not available") } diff --git a/Tests/GRDBTests/DatabaseValueConvertibleDecodableTests.swift b/Tests/GRDBTests/DatabaseValueConvertibleDecodableTests.swift index 53c98a282d..50d4952591 100644 --- a/Tests/GRDBTests/DatabaseValueConvertibleDecodableTests.swift +++ b/Tests/GRDBTests/DatabaseValueConvertibleDecodableTests.swift @@ -326,4 +326,57 @@ class DatabaseValueConvertibleDecodableTests: GRDBTestCase { XCTAssertEqual(value, .foo) } } + + func testDatabaseValueConvertibleImplementationDerivedFromDecodableWithCustomJsonDecoder() throws { + struct Value: Decodable, DatabaseValueConvertible { + let duration: Double + + var databaseValue: DatabaseValue { + preconditionFailure("not tested") + } + + public static func databaseJSONDecoder() -> JSONDecoder { + let decoder = JSONDecoder() + decoder.dataDecodingStrategy = .base64 + decoder.dateDecodingStrategy = .millisecondsSince1970 + decoder.nonConformingFloatDecodingStrategy = .convertFromString( + positiveInfinity: "+InF", + negativeInfinity: "-InF", + nan: "NaN" + ) + return decoder + } + } + + do { + // Success from DatabaseValue + let value = Value.fromDatabaseValue(#"{ "duration": "+InF" }"#.databaseValue)! + XCTAssertEqual(value.duration, Double.infinity) + + let value2 = Value.fromDatabaseValue(#"{ "duration": "-InF" }"#.databaseValue)! + XCTAssertEqual(value2.duration, -Double.infinity) + + let value3 = Value.fromDatabaseValue(#"{ "duration": "NaN" }"#.databaseValue)! + XCTAssertTrue(value3.duration.isNaN) + } + do { + // Success from database + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let value = try Value.fetchOne(db, sql: #"SELECT '{ "duration": "+InF" }'"#)! + XCTAssertEqual(value.duration, Double.infinity) + + let value2 = try Value.fetchOne(db, sql: #"SELECT '{ "duration": "-InF" }'"#)! + XCTAssertEqual(value2.duration, -Double.infinity) + + let value3 = try Value.fetchOne(db, sql: #"SELECT '{ "duration": "NaN" }'"#)! + XCTAssertTrue(value3.duration.isNaN) + } + } + do { + // Failure from DatabaseValue + let value = Value.fromDatabaseValue("infinity".databaseValue) + XCTAssertNil(value) + } + } } diff --git a/Tests/GRDBTests/DatabaseValueConvertibleEncodableTests.swift b/Tests/GRDBTests/DatabaseValueConvertibleEncodableTests.swift index 66b0b75506..00f44620fe 100644 --- a/Tests/GRDBTests/DatabaseValueConvertibleEncodableTests.swift +++ b/Tests/GRDBTests/DatabaseValueConvertibleEncodableTests.swift @@ -304,4 +304,39 @@ extension DatabaseValueConvertibleEncodableTests { let encodedUUID = UUID.fromDatabaseValue(dbValue)! XCTAssertEqual(encodedUUID, value.uuid) } + + func testDatabaseValueConvertibleImplementationDerivedFromEncodableWithCustomJsonEncoder() throws { + struct Value: Encodable, DatabaseValueConvertible { + let duration: Double + + static func fromDatabaseValue(_ databaseValue: DatabaseValue) -> Value? { + preconditionFailure("not tested") + } + + public static func databaseJSONEncoder() -> JSONEncoder { + let encoder = JSONEncoder() + encoder.dataEncodingStrategy = .base64 + encoder.dateEncodingStrategy = .millisecondsSince1970 + encoder.nonConformingFloatEncodingStrategy = .convertToString( + positiveInfinity: "+InF", + negativeInfinity: "-InF", + nan: "NaN" + ) + // guarantee some stability in order to ease value comparison + encoder.outputFormatting = .sortedKeys + return encoder + } + } + + do { + let dbValue = Value(duration: .infinity).databaseValue + XCTAssertEqual(dbValue.storage.value as! String, #"{"duration":"+InF"}"#) + + let dbValue2 = Value(duration: -Double.infinity).databaseValue + XCTAssertEqual(dbValue2.storage.value as! String, #"{"duration":"-InF"}"#) + + let dbValue3 = Value(duration: .nan).databaseValue + XCTAssertEqual(dbValue3.storage.value as! String, #"{"duration":"NaN"}"#) + } + } } diff --git a/Tests/GRDBTests/DatabaseWriterTests.swift b/Tests/GRDBTests/DatabaseWriterTests.swift index 76a7eb88de..9139b1f9eb 100644 --- a/Tests/GRDBTests/DatabaseWriterTests.swift +++ b/Tests/GRDBTests/DatabaseWriterTests.swift @@ -195,7 +195,7 @@ class DatabaseWriterTests : GRDBTestCase { } func testVacuumInto() throws { - guard #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) else { + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { throw XCTSkip("VACUUM INTO is not available") } // Prevent SQLCipher failures @@ -266,7 +266,7 @@ class DatabaseWriterTests : GRDBTestCase { try DatabaseQueue().backup(to: dbQueue) } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_write() async throws { func setup(_ dbWriter: T) throws -> T { try dbWriter.write { db in @@ -286,7 +286,7 @@ class DatabaseWriterTests : GRDBTestCase { try await test(setup(makeDatabasePool())) } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_writeWithoutTransaction() async throws { func setup(_ dbWriter: T) throws -> T { try dbWriter.write { db in @@ -309,7 +309,7 @@ class DatabaseWriterTests : GRDBTestCase { try await test(setup(makeDatabasePool())) } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_barrierWriteWithoutTransaction() async throws { func setup(_ dbWriter: T) throws -> T { try dbWriter.write { db in @@ -332,7 +332,7 @@ class DatabaseWriterTests : GRDBTestCase { try await test(setup(makeDatabasePool())) } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_erase() async throws { func setup(_ dbWriter: T) throws -> T { try dbWriter.write { db in @@ -350,7 +350,7 @@ class DatabaseWriterTests : GRDBTestCase { try await test(setup(makeDatabasePool())) } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_vacuum() async throws { func setup(_ dbWriter: T) throws -> T { try dbWriter.write { db in @@ -366,7 +366,7 @@ class DatabaseWriterTests : GRDBTestCase { try await test(setup(makeDatabasePool())) } - @available(macOS 10.16, iOS 14, tvOS 14, watchOS 7, *) // async + vacuum into + @available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) // async + vacuum into func testAsyncAwait_vacuumInto() async throws { // Prevent SQLCipher failures guard sqlite3_libversion_number() >= 3027000 else { @@ -395,4 +395,27 @@ class DatabaseWriterTests : GRDBTestCase { try await test(setup(makeDatabaseQueue())) try await test(setup(makeDatabasePool())) } + + /// A test related to + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) + func testAsyncWriteThenRead() async throws { + /// An async read performed after an async write should see the write. + func test(_ dbWriter: some DatabaseWriter) async throws { + try await dbWriter.write { db in + try db.execute(sql: """ + CREATE TABLE t (id INTEGER PRIMARY KEY); + INSERT INTO t VALUES (1); + """) + } + + let count = try await dbWriter.read { db in + try Table("t").fetchCount(db) + } + + XCTAssertEqual(count, 1) + } + + try await test(makeDatabaseQueue()) + try await test(makeDatabasePool()) + } } diff --git a/Tests/GRDBTests/DerivableRequestTests.swift b/Tests/GRDBTests/DerivableRequestTests.swift index fb360a85d4..1cb5f91b25 100644 --- a/Tests/GRDBTests/DerivableRequestTests.swift +++ b/Tests/GRDBTests/DerivableRequestTests.swift @@ -50,7 +50,7 @@ private var libraryMigrator: DatabaseMigrator = { } try db.create(table: "book") { t in t.autoIncrementedPrimaryKey("id") - t.column("authorId", .integer).notNull().references("author") + t.belongsTo("author").notNull() t.column("title", .text).notNull() } try db.create(virtualTable: "bookFts4", using: FTS4()) { t in @@ -124,6 +124,30 @@ extension DerivableRequest { } class DerivableRequestTests: GRDBTestCase { + func testAll() throws { + let dbQueue = try makeDatabaseQueue() + try libraryMigrator.migrate(dbQueue) + try dbQueue.inDatabase { db in + let baseRequest = Author.all().filter(country: "FR") + let request = baseRequest.all() + let (sql, arguments) = try request.build(db) + XCTAssertEqual(sql, #"SELECT * FROM "author" WHERE "country" = ?"#) + XCTAssertEqual(arguments, ["FR"]) + } + } + + func testNone() throws { + let dbQueue = try makeDatabaseQueue() + try libraryMigrator.migrate(dbQueue) + try dbQueue.inDatabase { db in + let baseRequest = Author.all().filter(country: "FR") + let request = baseRequest.none() + let (sql, arguments) = try request.build(db) + XCTAssertEqual(sql, #"SELECT * FROM "author" WHERE ("country" = ?) AND ?"#) + XCTAssertEqual(arguments, ["FR", false]) + } + } + func testFilteredRequest() throws { let dbQueue = try makeDatabaseQueue() try libraryMigrator.migrate(dbQueue) @@ -149,7 +173,13 @@ class DerivableRequestTests: GRDBTestCase { let dbQueue = try makeDatabaseQueue() try libraryMigrator.migrate(dbQueue) try dbQueue.inDatabase { db in - // ... for two requests (1) + try db.create(view: "authorView", as: Author.select( + AllColumns(), + [Column("firstName"), Column("lastName")] + .joined(operator: .concat) + .forKey("fullName"))) + + // ... for one table sqlQueries.removeAll() let authorNames = try Author.all() .orderByFullName() @@ -184,7 +214,70 @@ class DerivableRequestTests: GRDBTestCase { SELECT * FROM "author" """) - // ... for two requests (2) + sqlQueries.removeAll() + _ /* stableOrderAuthors */ = try Author.all() + .withStableOrder() + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT * FROM "author" ORDER BY "id" + """) + + sqlQueries.removeAll() + _ /* stableOrderAuthors */ = try Author.all() + .orderByFullName() + .withStableOrder() + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT * FROM "author" ORDER BY "lastName" COLLATE swiftLocalizedCaseInsensitiveCompare, "firstName" COLLATE swiftLocalizedCaseInsensitiveCompare, "id" + """) + + // ... for one view + sqlQueries.removeAll() + _ /* authorViewNames */ = try Table("authorView").all() + .order(Column("fullName")) + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT * FROM "authorView" \ + ORDER BY "fullName" + """) + + sqlQueries.removeAll() + _ /* reversedAuthorViewNames */ = try Table("authorView").all() + .order(Column("fullName")) + .reversed() + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT * FROM "authorView" \ + ORDER BY "fullName" DESC + """) + + sqlQueries.removeAll() + _ /* unorderedAuthorViews */ = try Table("authorView").all() + .order(Column("fullName")) + .unordered() + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT * FROM "authorView" + """) + + sqlQueries.removeAll() + _ /* stableOrderAuthorViews */ = try Table("authorView").all() + .withStableOrder() + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT * FROM "authorView" ORDER BY 1, 2, 3, 4, 5 + """) + + sqlQueries.removeAll() + _ /* stableOrderAuthorViews */ = try Table("authorView").all() + .order(Column("fullName")) + .withStableOrder() + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT * FROM "authorView" ORDER BY "fullName", 1, 2, 3, 4, 5 + """) + + // ... for two tables (2) sqlQueries.removeAll() let bookTitles = try Book .joining(required: Book.author.orderByFullName()) @@ -228,6 +321,23 @@ class DerivableRequestTests: GRDBTestCase { SELECT "book".* FROM "book" \ JOIN "author" ON "author"."id" = "book"."authorId" """) + + sqlQueries.removeAll() + _ /* stableOrderBooks */ = try Book + .joining(required: Book.author.orderByFullName()) + .orderByTitle() + .withStableOrder() + .fetchAll(db) + XCTAssertEqual(lastSQLQuery, """ + SELECT "book".* FROM "book" \ + JOIN "author" ON "author"."id" = "book"."authorId" \ + ORDER BY \ + "book"."title" COLLATE swiftLocalizedCaseInsensitiveCompare, \ + "book"."id", \ + "author"."lastName" COLLATE swiftLocalizedCaseInsensitiveCompare, \ + "author"."firstName" COLLATE swiftLocalizedCaseInsensitiveCompare, \ + "author"."id" + """) } } diff --git a/Tests/GRDBTests/EncryptionTests.swift b/Tests/GRDBTests/EncryptionTests.swift index d1ade3f7f5..a3a1c74536 100644 --- a/Tests/GRDBTests/EncryptionTests.swift +++ b/Tests/GRDBTests/EncryptionTests.swift @@ -637,6 +637,26 @@ class EncryptionTests: GRDBTestCase { } } + // Test for the use case described in + func testEncryptedDatabaseCanBeAttached() throws { + // Create encrypted db + var config = Configuration() + config.prepareDatabase { db in + try db.usePassphrase("secret") + } + let encryptedDBQueue = try makeDatabaseQueue(filename: "encrypted.sqlite", configuration: config) + try encryptedDBQueue.write { db in + try db.execute(sql: "CREATE TABLE t(a)") + } + + let attachingDBQueue = try makeDatabaseQueue() + try attachingDBQueue.inDatabase { db in + try db.execute(sql: "ATTACH DATABASE ? AS encrypted KEY ?", arguments: [encryptedDBQueue.path, "secret"]) + let count = try Table("t").fetchCount(db) + XCTAssertEqual(count, 0) + } + } + func testExportPlainTextDatabaseToEncryptedDatabase() throws { // See https://discuss.zetetic.net/t/how-to-encrypt-a-plaintext-sqlite-database-to-use-sqlcipher-and-avoid-file-is-encrypted-or-is-not-a-database-errors/868?source_topic_id=939 do { diff --git a/Tests/GRDBTests/FTS3RecordTests.swift b/Tests/GRDBTests/FTS3RecordTests.swift index 036fb5da4d..b37c08bb54 100644 --- a/Tests/GRDBTests/FTS3RecordTests.swift +++ b/Tests/GRDBTests/FTS3RecordTests.swift @@ -19,7 +19,7 @@ extension Book : FetchableRecord { extension Book : MutablePersistableRecord { static let databaseTableName = "books" - static let databaseSelection: [SQLSelectable] = [AllColumns(), Column.rowID] + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] func encode(to container: inout PersistenceContainer) { container[.rowID] = id diff --git a/Tests/GRDBTests/FTS3TableBuilderTests.swift b/Tests/GRDBTests/FTS3TableBuilderTests.swift index cd51aab516..b2434da37a 100644 --- a/Tests/GRDBTests/FTS3TableBuilderTests.swift +++ b/Tests/GRDBTests/FTS3TableBuilderTests.swift @@ -76,7 +76,7 @@ class FTS3TableBuilderTests: GRDBTestCase { } #elseif !GRDBCIPHER func testUnicode61TokenizerDiacriticsRemove() throws { - guard #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) else { + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { throw XCTSkip() } let dbQueue = try makeDatabaseQueue() diff --git a/Tests/GRDBTests/FTS4RecordTests.swift b/Tests/GRDBTests/FTS4RecordTests.swift index f6c2eb6f0f..646c3852ef 100644 --- a/Tests/GRDBTests/FTS4RecordTests.swift +++ b/Tests/GRDBTests/FTS4RecordTests.swift @@ -19,7 +19,7 @@ extension Book : FetchableRecord { extension Book : MutablePersistableRecord { static let databaseTableName = "books" - static let databaseSelection: [SQLSelectable] = [AllColumns(), Column.rowID] + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] func encode(to container: inout PersistenceContainer) { container[.rowID] = id diff --git a/Tests/GRDBTests/FTS5CustomTokenizerTests.swift b/Tests/GRDBTests/FTS5CustomTokenizerTests.swift index cc89a34877..f5d76765d6 100644 --- a/Tests/GRDBTests/FTS5CustomTokenizerTests.swift +++ b/Tests/GRDBTests/FTS5CustomTokenizerTests.swift @@ -6,7 +6,7 @@ import GRDB // A custom tokenizer that ignores some tokens private final class StopWordsTokenizer : FTS5CustomTokenizer { static let name = "stopWords" - let wrappedTokenizer: FTS5Tokenizer + let wrappedTokenizer: any FTS5Tokenizer let ignoredTokens: [String] init(db: Database, arguments: [String]) throws { @@ -23,7 +23,7 @@ private final class StopWordsTokenizer : FTS5CustomTokenizer { // TODO: test that deinit is called } - func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: Int32, tokenCallback: @escaping FTS5TokenCallback) -> Int32 { + func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: Int32, tokenCallback: @escaping FTS5TokenCallback) -> Int32 { // The way we implement stop words is by letting wrappedTokenizer do its // job but intercepting its tokens before they feed SQLite. @@ -62,7 +62,7 @@ private final class StopWordsTokenizer : FTS5CustomTokenizer { // A custom tokenizer that converts tokens to NFKC so that "fi" can match "fi" (U+FB01: LATIN SMALL LIGATURE FI) private final class NFKCTokenizer : FTS5CustomTokenizer { static let name = "nfkc" - let wrappedTokenizer: FTS5Tokenizer + let wrappedTokenizer: any FTS5Tokenizer init(db: Database, arguments: [String]) throws { if arguments.isEmpty { @@ -76,7 +76,7 @@ private final class NFKCTokenizer : FTS5CustomTokenizer { // TODO: test that deinit is called } - func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: Int32, tokenCallback: @escaping FTS5TokenCallback) -> Int32 { + func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: Int32, tokenCallback: @escaping FTS5TokenCallback) -> Int32 { // The way we implement NFKC conversion is by letting wrappedTokenizer // do its job, but intercepting its tokens before they feed SQLite. @@ -107,7 +107,7 @@ private final class NFKCTokenizer : FTS5CustomTokenizer { guard let addr = buffer.baseAddress else { return 0 // SQLITE_OK } - let pToken = UnsafeMutableRawPointer(mutating: addr).assumingMemoryBound(to: Int8.self) + let pToken = UnsafeMutableRawPointer(mutating: addr).assumingMemoryBound(to: CChar.self) let nToken = Int32(buffer.count) return customContext.tokenCallback(customContext.context, flags, pToken, nToken, iStart, iEnd) } @@ -119,7 +119,7 @@ private final class NFKCTokenizer : FTS5CustomTokenizer { // A custom tokenizer that defines synonyms private final class SynonymsTokenizer : FTS5CustomTokenizer { static let name = "synonyms" - let wrappedTokenizer: FTS5Tokenizer + let wrappedTokenizer: any FTS5Tokenizer let synonyms: [Set] init(db: Database, arguments: [String]) throws { @@ -135,7 +135,7 @@ private final class SynonymsTokenizer : FTS5CustomTokenizer { // TODO: test that deinit is called } - func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: Int32, tokenCallback: @escaping FTS5TokenCallback) -> Int32 { + func tokenize(context: UnsafeMutableRawPointer?, tokenization: FTS5Tokenization, pText: UnsafePointer?, nText: Int32, tokenCallback: @escaping FTS5TokenCallback) -> Int32 { // Don't look for synonyms when tokenizing queries, as advised by // https://www.sqlite.org/fts5.html#synonym_support if tokenization.contains(.query) { @@ -176,7 +176,7 @@ private final class SynonymsTokenizer : FTS5CustomTokenizer { guard let addr = buffer.baseAddress else { return 0 // SQLITE_OK } - let pToken = UnsafeMutableRawPointer(mutating: addr).assumingMemoryBound(to: Int8.self) + let pToken = UnsafeMutableRawPointer(mutating: addr).assumingMemoryBound(to: CChar.self) let nToken = Int32(buffer.count) // Set FTS5_TOKEN_COLOCATED for all but first token let synonymFlags = (index == 0) ? flags : flags | 1 // 1: FTS5_TOKEN_COLOCATED diff --git a/Tests/GRDBTests/FTS5RecordTests.swift b/Tests/GRDBTests/FTS5RecordTests.swift index eac65c0a2c..b25b0a0135 100644 --- a/Tests/GRDBTests/FTS5RecordTests.swift +++ b/Tests/GRDBTests/FTS5RecordTests.swift @@ -20,7 +20,7 @@ extension Book : FetchableRecord { extension Book : MutablePersistableRecord { static let databaseTableName = "books" - static let databaseSelection: [SQLSelectable] = [AllColumns(), Column.rowID] + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] func encode(to container: inout PersistenceContainer) { container[.rowID] = id diff --git a/Tests/GRDBTests/FTS5TableBuilderTests.swift b/Tests/GRDBTests/FTS5TableBuilderTests.swift index 9bccf6c116..b44dc6589f 100644 --- a/Tests/GRDBTests/FTS5TableBuilderTests.swift +++ b/Tests/GRDBTests/FTS5TableBuilderTests.swift @@ -130,7 +130,7 @@ class FTS5TableBuilderTests: GRDBTestCase { } #elseif !GRDBCIPHER func testUnicode61TokenizerDiacriticsRemove() throws { - guard #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) else { + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { throw XCTSkip() } @@ -334,5 +334,43 @@ class FTS5TableBuilderTests: GRDBTestCase { } } } + + // Regression test for + func testIssue1390() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + guard sqlite3_libversion_number() >= 3035000 else { + throw XCTSkip("UPSERT is not available") + } +#else + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { + throw XCTSkip("UPSERT is not available") + } +#endif + + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + struct Content: Codable, Hashable, PersistableRecord { + var id: String + var text: String + } + + try db.create(table: "content") { t in + t.primaryKey("id", .text) + t.column("text", .text) + } + + try db.create(virtualTable: "fts", using: FTS5()) { t in + t.tokenizer = .porter() + t.synchronize(withTable: "content") + t.column("id").notIndexed() + t.column("text") + } + + try db.inTransaction { + try Content(id: "foobar", text: "baz").upsert(db) + return .commit + } + } + } } #endif diff --git a/Tests/GRDBTests/FTS5WrapperTokenizerTests.swift b/Tests/GRDBTests/FTS5WrapperTokenizerTests.swift index b7861e5257..78330fcfc1 100644 --- a/Tests/GRDBTests/FTS5WrapperTokenizerTests.swift +++ b/Tests/GRDBTests/FTS5WrapperTokenizerTests.swift @@ -6,7 +6,7 @@ import GRDB // A custom wrapper tokenizer that ignores some tokens private final class StopWordsTokenizer : FTS5WrapperTokenizer { static let name = "stopWords" - var wrappedTokenizer: FTS5Tokenizer + var wrappedTokenizer: any FTS5Tokenizer init(db: Database, arguments: [String]) throws { if arguments.isEmpty { @@ -29,7 +29,7 @@ private final class StopWordsTokenizer : FTS5WrapperTokenizer { // A custom wrapper tokenizer that converts tokens to LatinAscii so that "fi" can match "fi" (U+FB01: LATIN SMALL LIGATURE FI), "ß", "ss", and "æ", "ae". private final class LatinAsciiTokenizer : FTS5WrapperTokenizer { static let name = "latinascii" - let wrappedTokenizer: FTS5Tokenizer + let wrappedTokenizer: any FTS5Tokenizer init(db: Database, arguments: [String]) throws { wrappedTokenizer = try db.makeTokenizer(.unicode61()) @@ -46,7 +46,7 @@ private final class LatinAsciiTokenizer : FTS5WrapperTokenizer { // A custom wrapper tokenizer that defines synonyms private final class SynonymsTokenizer : FTS5WrapperTokenizer { static let name = "synonyms" - let wrappedTokenizer: FTS5Tokenizer + let wrappedTokenizer: any FTS5Tokenizer let synonyms: [Set] = [["first", "1st"]] init(db: Database, arguments: [String]) throws { @@ -87,7 +87,7 @@ private final class SynonymsTokenizer : FTS5WrapperTokenizer { class CustomizedUnicode61WrappingTokenizer: FTS5WrapperTokenizer { static let name = "custom_unicode61" - let wrappedTokenizer: FTS5Tokenizer + let wrappedTokenizer: any FTS5Tokenizer required init(db: Database, arguments: [String]) throws { wrappedTokenizer = try db.makeTokenizer(.unicode61(diacritics: .removeLegacy, separators: ["X"])) diff --git a/Tests/GRDBTests/FetchableRecordDecodableTests.swift b/Tests/GRDBTests/FetchableRecordDecodableTests.swift index 5d336fdac3..1f7789a5e2 100644 --- a/Tests/GRDBTests/FetchableRecordDecodableTests.swift +++ b/Tests/GRDBTests/FetchableRecordDecodableTests.swift @@ -263,14 +263,130 @@ extension FetchableRecordDecodableTests { extension FetchableRecordDecodableTests { + func testStructWithData() throws { + struct StructWithData : FetchableRecord, Decodable { + let data: Data + } + + let dbQueue = try makeDatabaseQueue() + + do { + let data = "foo".data(using: .utf8) + + do { + let value = try StructWithData(row: ["data": data]) + XCTAssertEqual(value.data, data) + } + + do { + let value = try dbQueue.read { + try StructWithData.fetchOne($0, sql: "SELECT ? AS data", arguments: [data])! + } + XCTAssertEqual(value.data, data) + } + } + do { + do { + _ = try StructWithData(row: ["data": nil]) + XCTFail("Expected Error") + } catch let error as RowDecodingError { + switch error { + case .valueMismatch: + XCTAssertEqual(error.description, """ + could not decode Data from database value NULL - \ + column: "data", \ + column index: 0, \ + row: [data:NULL] + """) + default: + XCTFail("Unexpected Error") + } + } + + do { + try dbQueue.read { + _ = try StructWithData.fetchOne($0, sql: "SELECT NULL AS data") + } + XCTFail("Expected Error") + } catch let error as RowDecodingError { + switch error { + case .valueMismatch: + XCTAssertEqual(error.description, """ + could not decode Data from database value NULL - \ + column: "data", \ + column index: 0, \ + row: [data:NULL], \ + sql: `SELECT NULL AS data`, \ + arguments: [] + """) + default: + XCTFail("Unexpected Error") + } + } + } + } + func testStructWithDate() throws { struct StructWithDate : FetchableRecord, Decodable { let date: Date } - let date = Date() - let value = try StructWithDate(row: ["date": date]) - XCTAssert(abs(value.date.timeIntervalSince(date)) < 0.001) + let dbQueue = try makeDatabaseQueue() + + do { + let date = Date() + + do { + let value = try StructWithDate(row: ["date": date]) + XCTAssert(abs(value.date.timeIntervalSince(date)) < 0.001) + } + + do { + let value = try dbQueue.read { + try StructWithDate.fetchOne($0, sql: "SELECT ? AS date", arguments: [date])! + } + XCTAssert(abs(value.date.timeIntervalSince(date)) < 0.001) + } + } + do { + do { + _ = try StructWithDate(row: ["date": nil]) + XCTFail("Expected Error") + } catch let error as RowDecodingError { + switch error { + case .valueMismatch: + XCTAssertEqual(error.description, """ + could not decode Date from database value NULL - \ + column: "date", \ + column index: 0, \ + row: [date:NULL] + """) + default: + XCTFail("Unexpected Error") + } + } + + do { + try dbQueue.read { + _ = try StructWithDate.fetchOne($0, sql: "SELECT NULL AS date") + } + XCTFail("Expected Error") + } catch let error as RowDecodingError { + switch error { + case .valueMismatch: + XCTAssertEqual(error.description, """ + could not decode Date from database value NULL - \ + column: "date", \ + column index: 0, \ + row: [date:NULL], \ + sql: `SELECT NULL AS date`, \ + arguments: [] + """) + default: + XCTFail("Unexpected Error") + } + } + } } func testStructWithURL() throws { @@ -778,7 +894,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "requiredId": 1, "optionalName": "test1", "requiredDates": "[128000]", @@ -794,7 +910,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "REQUIREDID": 1, "OPTIONALNAME": "test1", "REQUIREDDATES": "[128000]", @@ -810,7 +926,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "requiredId": 1, "optionalName": nil, "requiredDates": "[128000]", @@ -826,7 +942,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "requiredId": 1, "requiredDates": "[128000]", "optionalDates": "[null, 128000]", @@ -840,7 +956,7 @@ extension FetchableRecordDecodableTests { } do { - _ = try RowDecoder().decode(Record.self, from: [ + _ = try FetchableRecordDecoder().decode(Record.self, from: [ "required_id": 1, "optionalName": "test1", "requiredDates": "[128000]", @@ -874,7 +990,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "required_id": 1, "optional_name": "test1", "required_dates": "[128000]", @@ -890,7 +1006,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "REQUIRED_ID": 1, "OPTIONAL_NAME": "test1", "REQUIRED_DATES": "[128000]", @@ -906,7 +1022,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "required_id": 1, "optional_name": nil, "required_dates": "[128000]", @@ -922,7 +1038,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "required_id": 1, "required_dates": "[128000]", "optional_dates": "[null, 128000]", @@ -937,7 +1053,7 @@ extension FetchableRecordDecodableTests { do { // Matches JSONDecoder.KeyDecodingStrategy.convertFromSnakeCase behavior - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "requiredId": 1, "optionalName": "test1", "requiredDates": "[128000]", @@ -953,7 +1069,7 @@ extension FetchableRecordDecodableTests { } do { - _ = try RowDecoder().decode(Record.self, from: [ + _ = try FetchableRecordDecoder().decode(Record.self, from: [ "required_idx": 1, "optional_name": "test1", "required_dates": "[128000]", @@ -970,7 +1086,7 @@ extension FetchableRecordDecodableTests { } do { - _ = try RowDecoder().decode(IncorrectRecord.self, from: [ + _ = try FetchableRecordDecoder().decode(IncorrectRecord.self, from: [ "required_id": 1, "optional_name": "test1", "required_dates": "[128000]", @@ -1008,7 +1124,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "_requiredId": 1, "_optionalName": "test1", "_requiredDates": "[128000]", @@ -1024,7 +1140,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "_requiredId": 1, "_optionalName": nil, "_requiredDates": "[128000]", @@ -1040,7 +1156,7 @@ extension FetchableRecordDecodableTests { } do { - let record = try RowDecoder().decode(Record.self, from: [ + let record = try FetchableRecordDecoder().decode(Record.self, from: [ "_requiredId": 1, "_requiredDates": "[128000]", "_optionalDates": "[null, 128000]", @@ -1054,7 +1170,7 @@ extension FetchableRecordDecodableTests { } do { - _ = try RowDecoder().decode(Record.self, from: [ + _ = try FetchableRecordDecoder().decode(Record.self, from: [ "requiredId": 1, "_optionalName": "test1", "_requiredDates": "[128000]", @@ -1377,7 +1493,7 @@ extension FetchableRecordDecodableTests { // all optionals decode missing keys are nil. This is because GRDB // records accept rows with missing columns, and b and c may want to // decode columns. - _ = try RowDecoder().decode(Composed.self, from: [:]) + _ = try FetchableRecordDecoder().decode(Composed.self, from: [:]) } // This is a regression test for https://github.com/groue/GRDB.swift/issues/664 @@ -1389,7 +1505,7 @@ extension FetchableRecordDecodableTests { var b: B } do { - _ = try RowDecoder().decode(Composed.self, from: [:]) + _ = try FetchableRecordDecoder().decode(Composed.self, from: [:]) XCTFail("Expected error") } catch DecodingError.keyNotFound { // a or b can not be decoded because only one key is allowed to be missing @@ -1409,7 +1525,7 @@ extension FetchableRecordDecodableTests { // - a is present // - root is b and c is missing, or the opposite (two possible user intents) let row = try Row.fetchOne(db, sql: "SELECT NULL", adapter: ScopeAdapter(["a": EmptyRowAdapter()]))! - _ = try RowDecoder().decode(Composed.self, from: row) + _ = try FetchableRecordDecoder().decode(Composed.self, from: row) XCTFail("Expected error") } catch let DecodingError.keyNotFound(key, context) { XCTAssert(["b", "c"].contains(key.stringValue)) @@ -1419,7 +1535,7 @@ extension FetchableRecordDecodableTests { // - b is present // - root is a and c is missing, or the opposite (two possible user intents) let row = try Row.fetchOne(db, sql: "SELECT NULL", adapter: ScopeAdapter(["b": EmptyRowAdapter()]))! - _ = try RowDecoder().decode(Composed.self, from: row) + _ = try FetchableRecordDecoder().decode(Composed.self, from: row) XCTFail("Expected error") } catch let DecodingError.keyNotFound(key, context) { XCTAssert(["a", "c"].contains(key.stringValue)) @@ -1429,7 +1545,7 @@ extension FetchableRecordDecodableTests { // - c is present // - root is a and b is missing, or the opposite (two possible user intents) let row = try Row.fetchOne(db, sql: "SELECT NULL", adapter: ScopeAdapter(["c": EmptyRowAdapter()]))! - _ = try RowDecoder().decode(Composed.self, from: row) + _ = try FetchableRecordDecoder().decode(Composed.self, from: row) XCTFail("Expected error") } catch let DecodingError.keyNotFound(key, context) { XCTAssert(["a", "b"].contains(key.stringValue)) @@ -1494,3 +1610,307 @@ extension FetchableRecordDecodableTests { fileprivate extension CodingUserInfoKey { static let testKey = CodingUserInfoKey(rawValue: "correct")! } + +// MARK: - Decodable + custom init + +extension FetchableRecordDecodableTests { + // Test for + func testStructWithCustomizedRowInitializer() throws { + struct Player: Decodable, FetchableRecord { + var id: Int + var name: String + var isFetched: Bool = false + + enum CodingKeys: String, CodingKey { + case id, name + } + + init(id: Int, name: String) { + self.id = id + self.name = name + self.isFetched = false + } + + init(row: Row) throws { + self = try FetchableRecordDecoder().decode(Player.self, from: row) + self.isFetched = true + } + } + + do { + let player = Player(id: 1, name: "Arthur") + XCTAssertEqual(player.id, 1) + XCTAssertEqual(player.name, "Arthur") + XCTAssertEqual(player.isFetched, false) + } + + do { + let player = try Player(row: ["id": 2, "name": "Barbara"]) + XCTAssertEqual(player.id, 2) + XCTAssertEqual(player.name, "Barbara") + XCTAssertEqual(player.isFetched, true) + } + } +} + +// MARK: - KeyedContainer tests + +extension FetchableRecordDecodableTests { + struct AnyCodingKey: CodingKey { + var stringValue: String + var intValue: Int? { nil } + + init(_ key: String) { + self.stringValue = key + } + + init(stringValue: String) { + self.stringValue = stringValue + } + + init?(intValue: Int) { + return nil + } + } + + func test_allKeys_and_containsKey() throws { + struct Witness: Decodable, FetchableRecord { + init(from decoder: any Decoder) throws { + // Top + let container = try decoder.container(keyedBy: AnyCodingKey.self) + do { + // Test allKeys + let allKeys = container.allKeys + XCTAssertEqual(Set(allKeys.map(\.stringValue)), [ + "a", + "topLevelScope1", + "topLevelScope2", + "nestedScope1", + "nestedScope2", + "prefetchedRows1", + "prefetchedRows2"]) + + // Test contains(_:) + for key in allKeys { + XCTAssertTrue(container.contains(key)) + } + XCTAssertFalse(container.contains(AnyCodingKey("b"))) + XCTAssertFalse(container.contains(AnyCodingKey("c"))) + } + + // topLevelScope1 + let topLevelScope1Container = try container.nestedContainer( + keyedBy: AnyCodingKey.self, + forKey: AnyCodingKey("topLevelScope1")) + do { + // Test allKeys + let allKeys = topLevelScope1Container.allKeys + XCTAssertEqual(Set(allKeys.map(\.stringValue)), [ + "c", + ]) + + // Test contains(_:) + for key in allKeys { + XCTAssertTrue(topLevelScope1Container.contains(key)) + } + } + + // topLevelScope2 + let topLevelScope2Container = try container.nestedContainer( + keyedBy: AnyCodingKey.self, + forKey: AnyCodingKey("topLevelScope2")) + do { + // Test allKeys + let allKeys = topLevelScope2Container.allKeys + XCTAssertEqual(Set(allKeys.map(\.stringValue)), [ + "nestedScope2", + "nestedScope1", + "prefetchedRows2", + ]) + + // Test contains(_:) + for key in allKeys { + XCTAssertTrue(topLevelScope2Container.contains(key)) + } + } + } + } + + try makeDatabaseQueue().read { db in + let row = try Row.fetchOne( + db, sql: """ + SELECT 1 AS a, -- main row + 2 AS b, -- not exposed + 3 AS c, -- scope topLevelScope1 + 4 AS d, -- scope topLevelScope2.nestedScope1 + 5 AS e -- scope topLevelScope2.nestedScope2 + """, + adapter: RangeRowAdapter(0..<1) + .addingScopes([ + "topLevelScope1": RangeRowAdapter(2..<3), + "topLevelScope2": EmptyRowAdapter().addingScopes([ + "nestedScope1": RangeRowAdapter(3..<4), + "nestedScope2": RangeRowAdapter(4..<5), + ]), + ]))! + + row.prefetchedRows.setRows([], forKeyPath: ["prefetchedRows1"]) + row.prefetchedRows.setRows([Row()], forKeyPath: ["topLevelScope2", "prefetchedRows2"]) + // Check test setup + XCTAssertEqual(row.debugDescription, """ + ▿ [a:1] + unadapted: [a:1 b:2 c:3 d:4 e:5] + - topLevelScope1: [c:3] + - topLevelScope2: [] + - nestedScope1: [d:4] + - nestedScope2: [e:5] + + prefetchedRows2: 1 row + + prefetchedRows1: 0 row + + prefetchedRows2: 1 row + """) + + // Test keyed container + _ = try FetchableRecordDecoder().decode(Witness.self, from: row) + } + } + + // Regression test for + func test_decodeNil_and_containsKey() throws { + struct Witness: Decodable, FetchableRecord { + struct NestedRecord: Decodable, FetchableRecord { } + + init(from decoder: any Decoder) throws { + let container = try decoder.container(keyedBy: AnyCodingKey.self) + + // column + do { + let key = AnyCodingKey("a") + let nilDecoded = try container.decodeNil(forKey: key) + let value = try container.decodeIfPresent(Int.self, forKey: key) + XCTAssertTrue(nilDecoded == (value == nil)) + XCTAssertTrue(container.contains(key)) + } + + // scope + do { + let key = AnyCodingKey("nested") + let nilDecoded = try container.decodeNil(forKey: key) + let value = try container.decodeIfPresent(NestedRecord.self, forKey: key) + XCTAssertTrue(nilDecoded == (value == nil)) + XCTAssertTrue(container.contains(key)) + } + + // missing key + do { + let key = AnyCodingKey("missing") + try XCTAssertTrue(container.decodeNil(forKey: key)) + try XCTAssertNil(container.decodeIfPresent(Int.self, forKey: key)) + try XCTAssertNil(container.decodeIfPresent(NestedRecord.self, forKey: key)) + XCTAssertFalse(container.contains(key)) + } + } + } + + try makeDatabaseQueue().read { db in + do { + let row = try Row.fetchOne( + db, sql: """ + SELECT 1 AS a, 2 AS b + """, + adapter: ScopeAdapter([ + "nested": RangeRowAdapter(1..<2), + ]))! + + // Check test setup + XCTAssertEqual(row.debugDescription, """ + ▿ [a:1 b:2] + unadapted: [a:1 b:2] + - nested: [b:2] + """) + + // Test keyed container + _ = try FetchableRecordDecoder().decode(Witness.self, from: row) + } + + do { + let row = try Row.fetchOne( + db, sql: """ + SELECT NULL AS a, NULL AS b + """, + adapter: ScopeAdapter([ + "nested": RangeRowAdapter(1..<2), + ]))! + + // Check test setup + XCTAssertEqual(row.debugDescription, """ + ▿ [a:NULL b:NULL] + unadapted: [a:NULL b:NULL] + - nested: [b:NULL] + """) + + // Test keyed container + _ = try FetchableRecordDecoder().decode(Witness.self, from: row) + } + } + } + + // Regression test for + func test_decodeNil_when_scope_and_column_have_the_same_name() throws { + struct Witness: Decodable, FetchableRecord { + struct NestedRecord: Decodable, FetchableRecord { } + + init(from decoder: any Decoder) throws { + let container = try decoder.container(keyedBy: AnyCodingKey.self) + + let key = AnyCodingKey("a") + let nilDecoded = try container.decodeNil(forKey: key) + let intValue = try container.decodeIfPresent(Int.self, forKey: key) + let recordValue = try container.decodeIfPresent(NestedRecord.self, forKey: key) + XCTAssertTrue(nilDecoded == (intValue == nil)) + XCTAssertTrue(nilDecoded == (recordValue == nil)) + } + } + + try makeDatabaseQueue().read { db in + do { + let row = try Row.fetchOne( + db, sql: """ + SELECT 1 AS a + """, + adapter: ScopeAdapter([ + "a": SuffixRowAdapter(fromIndex: 0), + ]))! + + // Check test setup + XCTAssertEqual(row.debugDescription, """ + ▿ [a:1] + unadapted: [a:1] + - a: [a:1] + """) + + // Test keyed container + _ = try FetchableRecordDecoder().decode(Witness.self, from: row) + } + + do { + let row = try Row.fetchOne( + db, sql: """ + SELECT NULL AS a + """, + adapter: ScopeAdapter([ + "a": SuffixRowAdapter(fromIndex: 0), + ]))! + + // Check test setup + XCTAssertEqual(row.debugDescription, """ + ▿ [a:NULL] + unadapted: [a:NULL] + - a: [a:NULL] + """) + + // Test keyed container + _ = try FetchableRecordDecoder().decode(Witness.self, from: row) + } + } + } +} diff --git a/Tests/GRDBTests/ForeignKeyDefinitionTests.swift b/Tests/GRDBTests/ForeignKeyDefinitionTests.swift new file mode 100644 index 0000000000..d6bacfc5a6 --- /dev/null +++ b/Tests/GRDBTests/ForeignKeyDefinitionTests.swift @@ -0,0 +1,1550 @@ +import XCTest +@testable import GRDB + +class ForeignKeyDefinitionTests: GRDBTestCase { + func testTable_belongsTo_hiddenRowID_plain() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "parent") { t in + t.column("name", .text) + } + + try db.create(table: "country") { t in + t.column("name", .text) + } + + try db.create(table: "teams") { t in + t.column("name", .text) + } + + try db.create(table: "people") { t in + t.column("name", .text) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent") + // Modified case of table name + t.belongsTo("COUNTRY") + // Singularized table name + t.belongsTo("team") + // Raw plural table name + t.belongsTo("people") + // Custom names + t.belongsTo("customParent", inTable: "parent") + t.belongsTo("customCountry", inTable: "country") + t.belongsTo("customTeam", inTable: "teams") + t.belongsTo("customPerson", inTable: "people") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(9), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentId" INTEGER REFERENCES "parent"("rowid"), \ + "COUNTRYId" INTEGER REFERENCES "COUNTRY"("rowid"), \ + "teamId" INTEGER REFERENCES "teams"("rowid"), \ + "peopleId" INTEGER REFERENCES "people"("rowid"), \ + "customParentId" INTEGER REFERENCES "parent"("rowid"), \ + "customCountryId" INTEGER REFERENCES "country"("rowid"), \ + "customTeamId" INTEGER REFERENCES "teams"("rowid"), \ + "customPersonId" INTEGER REFERENCES "people"("rowid"), \ + "b"\ + ) + """, + """ + CREATE INDEX "child_on_parentId" ON "child"("parentId") + """, + """ + CREATE INDEX "child_on_COUNTRYId" ON "child"("COUNTRYId") + """, + """ + CREATE INDEX "child_on_teamId" ON "child"("teamId") + """, + """ + CREATE INDEX "child_on_peopleId" ON "child"("peopleId") + """, + """ + CREATE INDEX "child_on_customParentId" ON "child"("customParentId") + """, + """ + CREATE INDEX "child_on_customCountryId" ON "child"("customCountryId") + """, + """ + CREATE INDEX "child_on_customTeamId" ON "child"("customTeamId") + """, + """ + CREATE INDEX "child_on_customPersonId" ON "child"("customPersonId") + """, + ]) + } + } + + func testTable_belongsTo_hiddenRowID_ifNotExists() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "parent") { t in + t.column("name", .text) + } + + try db.create(table: "country") { t in + t.column("name", .text) + } + + try db.create(table: "teams") { t in + t.column("name", .text) + } + + try db.create(table: "people") { t in + t.column("name", .text) + } + + sqlQueries.removeAll() + try db.create(table: "child", options: .ifNotExists) { t in + t.column("a") + t.belongsTo("parent") + // Modified case of table name + t.belongsTo("COUNTRY") + // Singularized table name + t.belongsTo("team") + // Raw plural table name + t.belongsTo("people") + t.column("b") + } + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE IF NOT EXISTS "child" (\ + "a", \ + "parentId" INTEGER REFERENCES "parent"("rowid"), \ + "COUNTRYId" INTEGER REFERENCES "COUNTRY"("rowid"), \ + "teamId" INTEGER REFERENCES "teams"("rowid"), \ + "peopleId" INTEGER REFERENCES "people"("rowid"), \ + "b") + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_parentId" ON "child"("parentId") + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_COUNTRYId" ON "child"("COUNTRYId") + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_teamId" ON "child"("teamId") + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_peopleId" ON "child"("peopleId") + """, + ]) + } + } + + func testTable_belongsTo_hiddenRowID_unique() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "parent") { t in + t.column("name", .text) + } + + try db.create(table: "country") { t in + t.column("name", .text) + } + + try db.create(table: "teams") { t in + t.column("name", .text) + } + + try db.create(table: "people") { t in + t.column("name", .text) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent").unique() + // Modified case of table name + t.belongsTo("COUNTRY").unique() + // Singularized table name + t.belongsTo("team").unique() + // Raw plural table name + t.belongsTo("people").unique() + t.column("b") + } + XCTAssertEqual(lastSQLQuery, """ + CREATE TABLE "child" (\ + "a", \ + "parentId" INTEGER UNIQUE REFERENCES "parent"("rowid"), \ + "COUNTRYId" INTEGER UNIQUE REFERENCES "COUNTRY"("rowid"), \ + "teamId" INTEGER UNIQUE REFERENCES "teams"("rowid"), \ + "peopleId" INTEGER UNIQUE REFERENCES "people"("rowid"), \ + "b") + """) + } + } + + func testTable_belongsTo_hiddenRowID_notIndexed() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "parent") { t in + t.column("name", .text) + } + + try db.create(table: "country") { t in + t.column("name", .text) + } + + try db.create(table: "teams") { t in + t.column("name", .text) + } + + try db.create(table: "people") { t in + t.column("name", .text) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent", indexed: false) + // Modified case of table name + t.belongsTo("COUNTRY", indexed: false) + // Singularized table name + t.belongsTo("team", indexed: false) + // Raw plural table name + t.belongsTo("people", indexed: false) + t.column("b") + } + XCTAssertEqual(lastSQLQuery, """ + CREATE TABLE "child" (\ + "a", \ + "parentId" INTEGER REFERENCES "parent"("rowid"), \ + "COUNTRYId" INTEGER REFERENCES "COUNTRY"("rowid"), \ + "teamId" INTEGER REFERENCES "teams"("rowid"), \ + "peopleId" INTEGER REFERENCES "people"("rowid"), \ + "b") + """) + } + } + + func testTable_belongsTo_hiddenRowID_notNull() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "parent") { t in + t.column("name", .text) + } + + try db.create(table: "country") { t in + t.column("name", .text) + } + + try db.create(table: "teams") { t in + t.column("name", .text) + } + + try db.create(table: "people") { t in + t.column("name", .text) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent").notNull() + // Modified case of table name + t.belongsTo("COUNTRY").notNull() + // Singularized table name + t.belongsTo("team").notNull() + // Raw plural table name + t.belongsTo("people").notNull() + t.column("b") + } + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentId" INTEGER NOT NULL REFERENCES "parent"("rowid"), \ + "COUNTRYId" INTEGER NOT NULL REFERENCES "COUNTRY"("rowid"), \ + "teamId" INTEGER NOT NULL REFERENCES "teams"("rowid"), \ + "peopleId" INTEGER NOT NULL REFERENCES "people"("rowid"), \ + "b") + """, + """ + CREATE INDEX "child_on_parentId" ON "child"("parentId") + """, + """ + CREATE INDEX "child_on_COUNTRYId" ON "child"("COUNTRYId") + """, + """ + CREATE INDEX "child_on_teamId" ON "child"("teamId") + """, + """ + CREATE INDEX "child_on_peopleId" ON "child"("peopleId") + """, + ]) + } + } + + func testTable_belongsTo_hiddenRowID_foreignKeyOptions() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "parent") { t in + t.column("name", .text) + } + + try db.create(table: "country") { t in + t.column("name", .text) + } + + try db.create(table: "teams") { t in + t.column("name", .text) + } + + try db.create(table: "people") { t in + t.column("name", .text) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Modified case of table name + t.belongsTo("COUNTRY", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Singularized table name + t.belongsTo("team", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Raw plural table name + t.belongsTo("people", onDelete: .cascade, onUpdate: .setNull, deferred: true) + t.column("b") + } + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentId" INTEGER REFERENCES "parent"("rowid") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "COUNTRYId" INTEGER REFERENCES "COUNTRY"("rowid") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "teamId" INTEGER REFERENCES "teams"("rowid") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "peopleId" INTEGER REFERENCES "people"("rowid") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "b") + """, + """ + CREATE INDEX "child_on_parentId" ON "child"("parentId") + """, + """ + CREATE INDEX "child_on_COUNTRYId" ON "child"("COUNTRYId") + """, + """ + CREATE INDEX "child_on_teamId" ON "child"("teamId") + """, + """ + CREATE INDEX "child_on_peopleId" ON "child"("peopleId") + """, + ]) + } + } + + func testTable_belongsTo_hiddenRowID_autoreference_singular() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "employee") { t in + t.column("a") + t.belongsTo("employee") + t.belongsTo("custom", inTable: "employee") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "employee" (\ + "a", \ + "employeeId" INTEGER REFERENCES "employee"("rowid"), \ + "customId" INTEGER REFERENCES "employee"("rowid"), \ + "b"\ + ) + """, + """ + CREATE INDEX "employee_on_employeeId" ON "employee"("employeeId") + """, + """ + CREATE INDEX "employee_on_customId" ON "employee"("customId") + """ + ]) + } + } + + func testTable_belongsTo_hiddenRowID_autoreference_plural() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "employees") { t in + t.column("a") + t.belongsTo("employee") + t.belongsTo("custom", inTable: "employees") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "employees" (\ + "a", \ + "employeeId" INTEGER REFERENCES "employees"("rowid"), \ + "customId" INTEGER REFERENCES "employees"("rowid"), \ + "b"\ + ) + """, + """ + CREATE INDEX "employees_on_employeeId" ON "employees"("employeeId") + """, + """ + CREATE INDEX "employees_on_customId" ON "employees"("customId") + """ + ]) + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_plain() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.primaryKey("primaryKey", .text) + } + + // Custom type + try db.create(table: "country") { t in + t.primaryKey("code", .init(rawValue: "CUSTOM TYPE")) + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey("primaryKey", .integer) + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey("id", .integer) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent") + // Modified case of table name + t.belongsTo("COUNTRY") + // Singularized table name + t.belongsTo("team") + // Raw plural table name + t.belongsTo("people") + // Custom names + t.belongsTo("customParent", inTable: "parent") + t.belongsTo("customCountry", inTable: "country") + t.belongsTo("customTeam", inTable: "teams") + t.belongsTo("customPerson", inTable: "people") + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(9), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentPrimaryKey" TEXT REFERENCES "parent"("primaryKey"), \ + "COUNTRYCode" CUSTOM TYPE REFERENCES "COUNTRY"("code"), \ + "teamPrimaryKey" INTEGER REFERENCES "teams"("primaryKey"), \ + "peopleId" INTEGER REFERENCES "people"("id"), \ + "customParentPrimaryKey" TEXT REFERENCES "parent"("primaryKey"), \ + "customCountryCode" CUSTOM TYPE REFERENCES "country"("code"), \ + "customTeamPrimaryKey" INTEGER REFERENCES "teams"("primaryKey"), \ + "customPersonId" INTEGER REFERENCES "people"("id"), \ + "e"\ + ) + """, + """ + CREATE INDEX "child_on_parentPrimaryKey" ON "child"("parentPrimaryKey") + """, + """ + CREATE INDEX "child_on_COUNTRYCode" ON "child"("COUNTRYCode") + """, + """ + CREATE INDEX "child_on_teamPrimaryKey" ON "child"("teamPrimaryKey") + """, + """ + CREATE INDEX "child_on_peopleId" ON "child"("peopleId") + """, + """ + CREATE INDEX "child_on_customParentPrimaryKey" ON "child"("customParentPrimaryKey") + """, + """ + CREATE INDEX "child_on_customCountryCode" ON "child"("customCountryCode") + """, + """ + CREATE INDEX "child_on_customTeamPrimaryKey" ON "child"("customTeamPrimaryKey") + """, + """ + CREATE INDEX "child_on_customPersonId" ON "child"("customPersonId") + """, + ]) + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_ifNotExists() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.primaryKey("primaryKey", .text) + } + + // Custom type + try db.create(table: "country") { t in + t.primaryKey("code", .init(rawValue: "CUSTOM TYPE")) + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey("primaryKey", .integer) + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey("id", .integer) + } + + sqlQueries.removeAll() + try db.create(table: "child", options: .ifNotExists) { t in + t.column("a") + t.belongsTo("parent") + // Modified case of table name + t.belongsTo("COUNTRY") + // Singularized table name + t.belongsTo("team") + // Raw plural table name + t.belongsTo("people") + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE IF NOT EXISTS "child" (\ + "a", \ + "parentPrimaryKey" TEXT REFERENCES "parent"("primaryKey"), \ + "COUNTRYCode" CUSTOM TYPE REFERENCES "COUNTRY"("code"), \ + "teamPrimaryKey" INTEGER REFERENCES "teams"("primaryKey"), \ + "peopleId" INTEGER REFERENCES "people"("id"), \ + "e"\ + ) + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_parentPrimaryKey" ON "child"("parentPrimaryKey") + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_COUNTRYCode" ON "child"("COUNTRYCode") + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_teamPrimaryKey" ON "child"("teamPrimaryKey") + """, + """ + CREATE INDEX IF NOT EXISTS "child_on_peopleId" ON "child"("peopleId") + """, + ]) + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_unique() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.primaryKey("primaryKey", .text) + } + + // Custom type + try db.create(table: "country") { t in + t.primaryKey("code", .init(rawValue: "CUSTOM TYPE")) + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey("primaryKey", .integer) + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey("id", .integer) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent").unique() + // Modified case of table name + t.belongsTo("COUNTRY").unique() + // Singularized table name + t.belongsTo("team").unique() + // Raw plural table name + t.belongsTo("people").unique() + t.column("e") + } + + XCTAssertEqual(lastSQLQuery, """ + CREATE TABLE "child" (\ + "a", \ + "parentPrimaryKey" TEXT UNIQUE REFERENCES "parent"("primaryKey"), \ + "COUNTRYCode" CUSTOM TYPE UNIQUE REFERENCES "COUNTRY"("code"), \ + "teamPrimaryKey" INTEGER UNIQUE REFERENCES "teams"("primaryKey"), \ + "peopleId" INTEGER UNIQUE REFERENCES "people"("id"), \ + "e"\ + ) + """) + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_notIndexed() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.primaryKey("primaryKey", .text) + } + + // Custom type + try db.create(table: "country") { t in + t.primaryKey("code", .init(rawValue: "CUSTOM TYPE")) + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey("primaryKey", .integer) + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey("id", .integer) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent", indexed: false) + // Modified case of table name + t.belongsTo("COUNTRY", indexed: false) + // Singularized table name + t.belongsTo("team", indexed: false) + // Raw plural table name + t.belongsTo("people", indexed: false) + t.column("e") + } + + XCTAssertEqual(lastSQLQuery, """ + CREATE TABLE "child" (\ + "a", \ + "parentPrimaryKey" TEXT REFERENCES "parent"("primaryKey"), \ + "COUNTRYCode" CUSTOM TYPE REFERENCES "COUNTRY"("code"), \ + "teamPrimaryKey" INTEGER REFERENCES "teams"("primaryKey"), \ + "peopleId" INTEGER REFERENCES "people"("id"), \ + "e"\ + ) + """) + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_notNull() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.primaryKey("primaryKey", .text) + } + + // Custom type + try db.create(table: "country") { t in + t.primaryKey("code", .init(rawValue: "CUSTOM TYPE")) + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey("primaryKey", .integer) + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey("id", .integer) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent").notNull() + // Modified case of table name + t.belongsTo("COUNTRY").notNull() + // Singularized table name + t.belongsTo("team").notNull() + // Raw plural table name + t.belongsTo("people").notNull() + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentPrimaryKey" TEXT NOT NULL REFERENCES "parent"("primaryKey"), \ + "COUNTRYCode" CUSTOM TYPE NOT NULL REFERENCES "COUNTRY"("code"), \ + "teamPrimaryKey" INTEGER NOT NULL REFERENCES "teams"("primaryKey"), \ + "peopleId" INTEGER NOT NULL REFERENCES "people"("id"), \ + "e"\ + ) + """, + """ + CREATE INDEX "child_on_parentPrimaryKey" ON "child"("parentPrimaryKey") + """, + """ + CREATE INDEX "child_on_COUNTRYCode" ON "child"("COUNTRYCode") + """, + """ + CREATE INDEX "child_on_teamPrimaryKey" ON "child"("teamPrimaryKey") + """, + """ + CREATE INDEX "child_on_peopleId" ON "child"("peopleId") + """, + ]) + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_foreignKeyOptions() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.primaryKey("primaryKey", .text) + } + + // Custom type + try db.create(table: "country") { t in + t.primaryKey("code", .init(rawValue: "CUSTOM TYPE")) + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey("primaryKey", .integer) + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey("id", .integer) + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Modified case of table name + t.belongsTo("COUNTRY", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Singularized table name + t.belongsTo("team", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Raw plural table name + t.belongsTo("people", onDelete: .cascade, onUpdate: .setNull, deferred: true) + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentPrimaryKey" TEXT REFERENCES "parent"("primaryKey") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "COUNTRYCode" CUSTOM TYPE REFERENCES "COUNTRY"("code") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "teamPrimaryKey" INTEGER REFERENCES "teams"("primaryKey") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "peopleId" INTEGER REFERENCES "people"("id") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + "e"\ + ) + """, + """ + CREATE INDEX "child_on_parentPrimaryKey" ON "child"("parentPrimaryKey") + """, + """ + CREATE INDEX "child_on_COUNTRYCode" ON "child"("COUNTRYCode") + """, + """ + CREATE INDEX "child_on_teamPrimaryKey" ON "child"("teamPrimaryKey") + """, + """ + CREATE INDEX "child_on_peopleId" ON "child"("peopleId") + """, + ]) + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_autoreference_singular() throws { + try makeDatabaseQueue().inDatabase { db in + do { + sqlQueries.removeAll() + try db.create(table: "employee") { t in + t.autoIncrementedPrimaryKey("id") + t.column("a") + t.belongsTo("employee") + t.belongsTo("custom", inTable: "employee") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "employee" (\ + "id" INTEGER PRIMARY KEY AUTOINCREMENT, \ + "a", \ + "employeeId" INTEGER REFERENCES "employee"("id"), \ + "customId" INTEGER REFERENCES "employee"("id"), \ + "b"\ + ) + """, + """ + CREATE INDEX "employee_on_employeeId" ON "employee"("employeeId") + """, + """ + CREATE INDEX "employee_on_customId" ON "employee"("customId") + """ + ]) + } + + do { + sqlQueries.removeAll() + try db.create(table: "node") { t in + t.primaryKey { t.column("code") } + t.column("a") + t.belongsTo("node") + t.belongsTo("custom", inTable: "node") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "node" (\ + "code" NOT NULL, \ + "a", \ + "nodeCode" REFERENCES "node"("code"), \ + "customCode" REFERENCES "node"("code"), \ + "b", \ + PRIMARY KEY ("code")\ + ) + """, + """ + CREATE INDEX "node_on_nodeCode" ON "node"("nodeCode") + """, + """ + CREATE INDEX "node_on_customCode" ON "node"("customCode") + """ + ]) + } + } + } + + func testTable_belongsTo_singleColumnPrimaryKey_autoreference_plural() throws { + try makeDatabaseQueue().inDatabase { db in + do { + sqlQueries.removeAll() + try db.create(table: "employees") { t in + t.autoIncrementedPrimaryKey("id") + t.column("a") + t.belongsTo("employee") + t.belongsTo("custom", inTable: "employees") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "employees" (\ + "id" INTEGER PRIMARY KEY AUTOINCREMENT, \ + "a", \ + "employeeId" INTEGER REFERENCES "employees"("id"), \ + "customId" INTEGER REFERENCES "employees"("id"), \ + "b"\ + ) + """, + """ + CREATE INDEX "employees_on_employeeId" ON "employees"("employeeId") + """, + """ + CREATE INDEX "employees_on_customId" ON "employees"("customId") + """ + ]) + } + + do { + sqlQueries.removeAll() + try db.create(table: "nodes") { t in + t.primaryKey { t.column("code") } + t.column("a") + t.belongsTo("node") + t.belongsTo("custom", inTable: "nodes") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "nodes" (\ + "code" NOT NULL, \ + "a", \ + "nodeCode" REFERENCES "nodes"("code"), \ + "customCode" REFERENCES "nodes"("code"), \ + "b", \ + PRIMARY KEY ("code")\ + ) + """, + """ + CREATE INDEX "nodes_on_nodeCode" ON "nodes"("nodeCode") + """, + """ + CREATE INDEX "nodes_on_customCode" ON "nodes"("customCode") + """ + ]) + } + } + } + + func testTable_belongsTo_compositePrimaryKey_plain() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.column("a", .text) + t.column("b", .init(rawValue: "CUSTOM TYPE")) // Custom type + t.column("c") // No declared type + t.primaryKey(["a", "b", "c"]) + } + + try db.create(table: "country") { t in + t.primaryKey { + t.column("left", .text) + t.column("right", .integer) + } + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey { + t.column("top", .text) + t.column("bottom", .integer) + } + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey { + t.column("min", .text) + t.column("max", .integer) + } + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent") + // Modified case of table name + t.belongsTo("COUNTRY") + // Singularized table name + t.belongsTo("team") + // Raw plural table name + t.belongsTo("people") + // Custom names + t.belongsTo("customParent", inTable: "parent") + t.belongsTo("customCountry", inTable: "country") + t.belongsTo("customTeam", inTable: "teams") + t.belongsTo("customPerson", inTable: "people") + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(9), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentA" TEXT, \ + "parentB" CUSTOM TYPE, \ + "parentC", \ + "COUNTRYLeft" TEXT, \ + "COUNTRYRight" INTEGER, \ + "teamTop" TEXT, \ + "teamBottom" INTEGER, \ + "peopleMin" TEXT, \ + "peopleMax" INTEGER, \ + "customParentA" TEXT, \ + "customParentB" CUSTOM TYPE, \ + "customParentC", \ + "customCountryLeft" TEXT, \ + "customCountryRight" INTEGER, \ + "customTeamTop" TEXT, \ + "customTeamBottom" INTEGER, \ + "customPersonMin" TEXT, \ + "customPersonMax" INTEGER, \ + "e", \ + FOREIGN KEY ("parentA", "parentB", "parentC") REFERENCES "parent"("a", "b", "c"), \ + FOREIGN KEY ("COUNTRYLeft", "COUNTRYRight") REFERENCES "COUNTRY"("left", "right"), \ + FOREIGN KEY ("teamTop", "teamBottom") REFERENCES "teams"("top", "bottom"), \ + FOREIGN KEY ("peopleMin", "peopleMax") REFERENCES "people"("min", "max"), \ + FOREIGN KEY ("customParentA", "customParentB", "customParentC") REFERENCES "parent"("a", "b", "c"), \ + FOREIGN KEY ("customCountryLeft", "customCountryRight") REFERENCES "country"("left", "right"), \ + FOREIGN KEY ("customTeamTop", "customTeamBottom") REFERENCES "teams"("top", "bottom"), \ + FOREIGN KEY ("customPersonMin", "customPersonMax") REFERENCES "people"("min", "max")\ + ) + """, + """ + CREATE INDEX "index_child_on_parentA_parentB_parentC" ON "child"("parentA", "parentB", "parentC") + """, + """ + CREATE INDEX "index_child_on_COUNTRYLeft_COUNTRYRight" ON "child"("COUNTRYLeft", "COUNTRYRight") + """, + """ + CREATE INDEX "index_child_on_teamTop_teamBottom" ON "child"("teamTop", "teamBottom") + """, + """ + CREATE INDEX "index_child_on_peopleMin_peopleMax" ON "child"("peopleMin", "peopleMax") + """, + """ + CREATE INDEX "index_child_on_customParentA_customParentB_customParentC" ON "child"("customParentA", "customParentB", "customParentC") + """, + """ + CREATE INDEX "index_child_on_customCountryLeft_customCountryRight" ON "child"("customCountryLeft", "customCountryRight") + """, + """ + CREATE INDEX "index_child_on_customTeamTop_customTeamBottom" ON "child"("customTeamTop", "customTeamBottom") + """, + """ + CREATE INDEX "index_child_on_customPersonMin_customPersonMax" ON "child"("customPersonMin", "customPersonMax") + """, + ]) + } + } + + func testTable_belongsTo_compositePrimaryKey_ifNotExists() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.column("a", .text) + t.column("b", .init(rawValue: "CUSTOM TYPE")) // Custom type + t.column("c") // No declared type + t.primaryKey(["a", "b", "c"]) + } + + try db.create(table: "country") { t in + t.primaryKey { + t.column("left", .text) + t.column("right", .integer) + } + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey { + t.column("top", .text) + t.column("bottom", .integer) + } + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey { + t.column("min", .text) + t.column("max", .integer) + } + } + + sqlQueries.removeAll() + try db.create(table: "child", options: .ifNotExists) { t in + t.column("a") + t.belongsTo("parent") + // Modified case of table name + t.belongsTo("COUNTRY") + // Singularized table name + t.belongsTo("team") + // Raw plural table name + t.belongsTo("people") + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE IF NOT EXISTS "child" (\ + "a", \ + "parentA" TEXT, \ + "parentB" CUSTOM TYPE, \ + "parentC", \ + "COUNTRYLeft" TEXT, \ + "COUNTRYRight" INTEGER, \ + "teamTop" TEXT, \ + "teamBottom" INTEGER, \ + "peopleMin" TEXT, \ + "peopleMax" INTEGER, \ + "e", \ + FOREIGN KEY ("parentA", "parentB", "parentC") REFERENCES "parent"("a", "b", "c"), \ + FOREIGN KEY ("COUNTRYLeft", "COUNTRYRight") REFERENCES "COUNTRY"("left", "right"), \ + FOREIGN KEY ("teamTop", "teamBottom") REFERENCES "teams"("top", "bottom"), \ + FOREIGN KEY ("peopleMin", "peopleMax") REFERENCES "people"("min", "max")\ + ) + """, + """ + CREATE INDEX IF NOT EXISTS "index_child_on_parentA_parentB_parentC" ON "child"("parentA", "parentB", "parentC") + """, + """ + CREATE INDEX IF NOT EXISTS "index_child_on_COUNTRYLeft_COUNTRYRight" ON "child"("COUNTRYLeft", "COUNTRYRight") + """, + """ + CREATE INDEX IF NOT EXISTS "index_child_on_teamTop_teamBottom" ON "child"("teamTop", "teamBottom") + """, + """ + CREATE INDEX IF NOT EXISTS "index_child_on_peopleMin_peopleMax" ON "child"("peopleMin", "peopleMax") + """, + ]) + } + } + + func testTable_belongsTo_compositePrimaryKey_unique() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.column("a", .text) + t.column("b", .init(rawValue: "CUSTOM TYPE")) // Custom type + t.column("c") // No declared type + t.primaryKey(["a", "b", "c"]) + } + + try db.create(table: "country") { t in + t.primaryKey { + t.column("left", .text) + t.column("right", .integer) + } + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey { + t.column("top", .text) + t.column("bottom", .integer) + } + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey { + t.column("min", .text) + t.column("max", .integer) + } + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent").unique() + // Modified case of table name + t.belongsTo("COUNTRY").unique() + // Singularized table name + t.belongsTo("team").unique() + // Raw plural table name + t.belongsTo("people").unique() + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentA" TEXT, \ + "parentB" CUSTOM TYPE, \ + "parentC", \ + "COUNTRYLeft" TEXT, \ + "COUNTRYRight" INTEGER, \ + "teamTop" TEXT, \ + "teamBottom" INTEGER, \ + "peopleMin" TEXT, \ + "peopleMax" INTEGER, \ + "e", \ + FOREIGN KEY ("parentA", "parentB", "parentC") REFERENCES "parent"("a", "b", "c"), \ + FOREIGN KEY ("COUNTRYLeft", "COUNTRYRight") REFERENCES "COUNTRY"("left", "right"), \ + FOREIGN KEY ("teamTop", "teamBottom") REFERENCES "teams"("top", "bottom"), \ + FOREIGN KEY ("peopleMin", "peopleMax") REFERENCES "people"("min", "max")\ + ) + """, + """ + CREATE UNIQUE INDEX "index_child_on_parentA_parentB_parentC" ON "child"("parentA", "parentB", "parentC") + """, + """ + CREATE UNIQUE INDEX "index_child_on_COUNTRYLeft_COUNTRYRight" ON "child"("COUNTRYLeft", "COUNTRYRight") + """, + """ + CREATE UNIQUE INDEX "index_child_on_teamTop_teamBottom" ON "child"("teamTop", "teamBottom") + """, + """ + CREATE UNIQUE INDEX "index_child_on_peopleMin_peopleMax" ON "child"("peopleMin", "peopleMax") + """, + ]) + } + } + + func testTable_belongsTo_compositePrimaryKey_notIndexed() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.column("a", .text) + t.column("b", .init(rawValue: "CUSTOM TYPE")) // Custom type + t.column("c") // No declared type + t.primaryKey(["a", "b", "c"]) + } + + try db.create(table: "country") { t in + t.primaryKey { + t.column("left", .text) + t.column("right", .integer) + } + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey { + t.column("top", .text) + t.column("bottom", .integer) + } + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey { + t.column("min", .text) + t.column("max", .integer) + } + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent", indexed: false) + // Modified case of table name + t.belongsTo("COUNTRY", indexed: false) + // Singularized table name + t.belongsTo("team", indexed: false) + // Raw plural table name + t.belongsTo("people", indexed: false) + t.column("e") + } + + XCTAssertEqual(lastSQLQuery, """ + CREATE TABLE "child" (\ + "a", \ + "parentA" TEXT, \ + "parentB" CUSTOM TYPE, \ + "parentC", \ + "COUNTRYLeft" TEXT, \ + "COUNTRYRight" INTEGER, \ + "teamTop" TEXT, \ + "teamBottom" INTEGER, \ + "peopleMin" TEXT, \ + "peopleMax" INTEGER, \ + "e", \ + FOREIGN KEY ("parentA", "parentB", "parentC") REFERENCES "parent"("a", "b", "c"), \ + FOREIGN KEY ("COUNTRYLeft", "COUNTRYRight") REFERENCES "COUNTRY"("left", "right"), \ + FOREIGN KEY ("teamTop", "teamBottom") REFERENCES "teams"("top", "bottom"), \ + FOREIGN KEY ("peopleMin", "peopleMax") REFERENCES "people"("min", "max")\ + ) + """) + } + } + + func testTable_belongsTo_compositePrimaryKey_notNull() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.column("a", .text) + t.column("b", .init(rawValue: "CUSTOM TYPE")) // Custom type + t.column("c") // No declared type + t.primaryKey(["a", "b", "c"]) + } + + try db.create(table: "country") { t in + t.primaryKey { + t.column("left", .text) + t.column("right", .integer) + } + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey { + t.column("top", .text) + t.column("bottom", .integer) + } + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey { + t.column("min", .text) + t.column("max", .integer) + } + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent").notNull() + // Modified case of table name + t.belongsTo("COUNTRY").notNull() + // Singularized table name + t.belongsTo("team").notNull() + // Raw plural table name + t.belongsTo("people").notNull() + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentA" TEXT NOT NULL, \ + "parentB" CUSTOM TYPE NOT NULL, \ + "parentC" NOT NULL, \ + "COUNTRYLeft" TEXT NOT NULL, \ + "COUNTRYRight" INTEGER NOT NULL, \ + "teamTop" TEXT NOT NULL, \ + "teamBottom" INTEGER NOT NULL, \ + "peopleMin" TEXT NOT NULL, \ + "peopleMax" INTEGER NOT NULL, \ + "e", \ + FOREIGN KEY ("parentA", "parentB", "parentC") REFERENCES "parent"("a", "b", "c"), \ + FOREIGN KEY ("COUNTRYLeft", "COUNTRYRight") REFERENCES "COUNTRY"("left", "right"), \ + FOREIGN KEY ("teamTop", "teamBottom") REFERENCES "teams"("top", "bottom"), \ + FOREIGN KEY ("peopleMin", "peopleMax") REFERENCES "people"("min", "max")\ + ) + """, + """ + CREATE INDEX "index_child_on_parentA_parentB_parentC" ON "child"("parentA", "parentB", "parentC") + """, + """ + CREATE INDEX "index_child_on_COUNTRYLeft_COUNTRYRight" ON "child"("COUNTRYLeft", "COUNTRYRight") + """, + """ + CREATE INDEX "index_child_on_teamTop_teamBottom" ON "child"("teamTop", "teamBottom") + """, + """ + CREATE INDEX "index_child_on_peopleMin_peopleMax" ON "child"("peopleMin", "peopleMax") + """, + ]) + } + } + + func testTable_belongsTo_compositePrimaryKey_foreignKeyOptions() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "parent") { t in + t.column("a", .text) + t.column("b", .init(rawValue: "CUSTOM TYPE")) // Custom type + t.column("c") // No declared type + t.primaryKey(["a", "b", "c"]) + } + + try db.create(table: "country") { t in + t.primaryKey { + t.column("left", .text) + t.column("right", .integer) + } + } + + // Plural table + try db.create(table: "teams") { t in + t.primaryKey { + t.column("top", .text) + t.column("bottom", .integer) + } + } + + // Plural table + try db.create(table: "people") { t in + t.primaryKey { + t.column("min", .text) + t.column("max", .integer) + } + } + + sqlQueries.removeAll() + try db.create(table: "child") { t in + t.column("a") + t.belongsTo("parent", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Modified case of table name + t.belongsTo("COUNTRY", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Singularized table name + t.belongsTo("team", onDelete: .cascade, onUpdate: .setNull, deferred: true) + // Raw plural table name + t.belongsTo("people", onDelete: .cascade, onUpdate: .setNull, deferred: true) + t.column("e") + } + + XCTAssertEqual(sqlQueries.suffix(5), [ + """ + CREATE TABLE "child" (\ + "a", \ + "parentA" TEXT, \ + "parentB" CUSTOM TYPE, \ + "parentC", \ + "COUNTRYLeft" TEXT, \ + "COUNTRYRight" INTEGER, \ + "teamTop" TEXT, \ + "teamBottom" INTEGER, \ + "peopleMin" TEXT, \ + "peopleMax" INTEGER, \ + "e", \ + FOREIGN KEY ("parentA", "parentB", "parentC") REFERENCES "parent"("a", "b", "c") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + FOREIGN KEY ("COUNTRYLeft", "COUNTRYRight") REFERENCES "COUNTRY"("left", "right") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + FOREIGN KEY ("teamTop", "teamBottom") REFERENCES "teams"("top", "bottom") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED, \ + FOREIGN KEY ("peopleMin", "peopleMax") REFERENCES "people"("min", "max") ON DELETE CASCADE ON UPDATE SET NULL DEFERRABLE INITIALLY DEFERRED\ + ) + """, + """ + CREATE INDEX "index_child_on_parentA_parentB_parentC" ON "child"("parentA", "parentB", "parentC") + """, + """ + CREATE INDEX "index_child_on_COUNTRYLeft_COUNTRYRight" ON "child"("COUNTRYLeft", "COUNTRYRight") + """, + """ + CREATE INDEX "index_child_on_teamTop_teamBottom" ON "child"("teamTop", "teamBottom") + """, + """ + CREATE INDEX "index_child_on_peopleMin_peopleMax" ON "child"("peopleMin", "peopleMax") + """, + ]) + } + } + + func testTable_belongsTo_compositePrimaryKey_autoreference_singular() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "employee") { t in + t.primaryKey { + t.column("left") + t.column("right") + } + t.column("a") + t.belongsTo("employee") + t.belongsTo("custom", inTable: "employee") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "employee" (\ + "left" NOT NULL, \ + "right" NOT NULL, \ + "a", \ + "employeeLeft", \ + "employeeRight", \ + "customLeft", \ + "customRight", \ + "b", \ + PRIMARY KEY ("left", "right"), \ + FOREIGN KEY ("employeeLeft", "employeeRight") REFERENCES "employee"("left", "right"), \ + FOREIGN KEY ("customLeft", "customRight") REFERENCES "employee"("left", "right")\ + ) + """, + """ + CREATE INDEX "index_employee_on_employeeLeft_employeeRight" ON "employee"("employeeLeft", "employeeRight") + """, + """ + CREATE INDEX "index_employee_on_customLeft_customRight" ON "employee"("customLeft", "customRight") + """ + ]) + } + } + + func testTable_belongsTo_compositePrimaryKey_autoreference_plural() throws { + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "employees") { t in + t.primaryKey { + t.column("left") + t.column("right") + } + t.column("a") + t.belongsTo("employee") + t.belongsTo("custom", inTable: "employees") + t.column("b") + } + + XCTAssertEqual(sqlQueries.suffix(3), [ + """ + CREATE TABLE "employees" (\ + "left" NOT NULL, \ + "right" NOT NULL, \ + "a", \ + "employeeLeft", \ + "employeeRight", \ + "customLeft", \ + "customRight", \ + "b", \ + PRIMARY KEY ("left", "right"), \ + FOREIGN KEY ("employeeLeft", "employeeRight") REFERENCES "employees"("left", "right"), \ + FOREIGN KEY ("customLeft", "customRight") \ + REFERENCES "employees"("left", "right")\ + ) + """, + """ + CREATE INDEX "index_employees_on_employeeLeft_employeeRight" ON "employees"("employeeLeft", "employeeRight") + """, + """ + CREATE INDEX "index_employees_on_customLeft_customRight" ON "employees"("customLeft", "customRight") + """ + ]) + } + } + + func testTable_belongsTo_as_primary_key() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "composite") { t in + t.primaryKey { + t.column("a", .text) + t.column("b", .text) + } + } + try db.create(table: "simple") { t in + t.autoIncrementedPrimaryKey("id") + } + + do { + try db.create(table: "compositeChild") { t in + t.primaryKey { + t.belongsTo("composite") + } + } + assertEqualSQL(lastSQLQuery!, """ + CREATE TABLE "compositeChild" (\ + "compositeA" TEXT NOT NULL, \ + "compositeB" TEXT NOT NULL, \ + PRIMARY KEY ("compositeA", "compositeB"), \ + FOREIGN KEY ("compositeA", "compositeB") REFERENCES "composite"("a", "b")\ + ) + """) + } + + do { + try db.create(table: "simpleChild") { t in + t.primaryKey { + t.belongsTo("simple") + } + } + assertEqualSQL(lastSQLQuery!, """ + CREATE TABLE "simpleChild" (\ + "simpleId" INTEGER NOT NULL REFERENCES "simple"("id"), \ + PRIMARY KEY ("simpleId")\ + ) + """) + } + + do { + try db.create(table: "complex") { t in + t.primaryKey { + t.column("a") + t.belongsTo("composite") + t.belongsTo("simple") + t.column("b") + } + } + assertEqualSQL(lastSQLQuery!, """ + CREATE TABLE "complex" (\ + "a" NOT NULL, \ + "compositeA" TEXT NOT NULL, \ + "compositeB" TEXT NOT NULL, \ + "simpleId" INTEGER NOT NULL REFERENCES "simple"("id"), \ + "b" NOT NULL, \ + PRIMARY KEY ("a", "compositeA", "compositeB", "simpleId", "b"), \ + FOREIGN KEY ("compositeA", "compositeB") REFERENCES "composite"("a", "b")\ + ) + """) + } + } + } + + func testTable_invalid_belongsTo_as_primary_key() throws { + try makeDatabaseQueue().inDatabase { db in + do { + // Invalid circular definition + try db.create(table: "player") { t in + t.primaryKey { + t.belongsTo("player") + } + } + XCTFail("Expected error") + } catch { } + } + } +} diff --git a/Tests/GRDBTests/ForeignKeyInfoTests.swift b/Tests/GRDBTests/ForeignKeyInfoTests.swift index 4ecb4d5d4b..e425ccc329 100644 --- a/Tests/GRDBTests/ForeignKeyInfoTests.swift +++ b/Tests/GRDBTests/ForeignKeyInfoTests.swift @@ -12,6 +12,8 @@ class ForeignKeyInfoTests: GRDBTestCase { } } + // MARK: Foreign key info + func testForeignKeys() throws { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -68,6 +70,120 @@ class ForeignKeyInfoTests: GRDBTestCase { } } + func testUnknownSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.writeWithoutTransaction { db in + try db.execute(sql: "CREATE TABLE parents1 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children1 (parentId REFERENCES parents1)") + do { + _ = try db.foreignKeys(on: "children1", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + } + } + + func testSpecifiedMainSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: "CREATE TABLE parents1 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children1 (parentId REFERENCES parents1)") + + do { + let foreignKeys = try db.foreignKeys(on: "children1", in: "main") + XCTAssertEqual(foreignKeys.count, 1) + assertEqual(foreignKeys[0], ForeignKeyInfo(id: foreignKeys[0].id, destinationTable: "parents1", mapping: [(origin: "parentId", destination: "id")])) + } + } + } + + func testSpecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE parents2 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children (parentId REFERENCES parents2)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: "CREATE TABLE parents1 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children (parentId REFERENCES parents1)") + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + do { + let foreignKeys = try db.foreignKeys(on: "children", in: "attached") + XCTAssertEqual(foreignKeys.count, 1) + assertEqual(foreignKeys[0], ForeignKeyInfo(id: foreignKeys[0].id, destinationTable: "parents2", mapping: [(origin: "parentId", destination: "id")])) + } + } + } + + // The `children` table in the attached database should not + // be found unless explicitly specified as it is after + // `main.children` in resolution order. + func testUnspecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE parents2 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children (parentId REFERENCES parents2)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: "CREATE TABLE parents1 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children (parentId REFERENCES parents1)") + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + do { + let foreignKeys = try db.foreignKeys(on: "children") + XCTAssertEqual(foreignKeys.count, 1) + assertEqual(foreignKeys[0], ForeignKeyInfo(id: foreignKeys[0].id, destinationTable: "parents1", mapping: [(origin: "parentId", destination: "id")])) + } + } + } + + func testUnspecifiedSchemaFindsAttachedDatabase() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE parents2 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children2 (parentId REFERENCES parents2)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: "CREATE TABLE parents1 (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE children1 (parentId REFERENCES parents1)") + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + do { + let foreignKeys = try db.foreignKeys(on: "children2") + XCTAssertEqual(foreignKeys.count, 1) + assertEqual(foreignKeys[0], ForeignKeyInfo(id: foreignKeys[0].id, destinationTable: "parents2", mapping: [(origin: "parentId", destination: "id")])) + } + } + } + + + // MARK: Foreign key violations + func testForeignKeyViolations() throws { try makeDatabaseQueue().writeWithoutTransaction { db in try db.execute(sql: """ @@ -221,4 +337,186 @@ class ForeignKeyInfoTests: GRDBTestCase { } } } + + func testForeignKeyViolationsUnknownSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.writeWithoutTransaction { db in + try db.execute(sql: "CREATE TABLE parent (id PRIMARY KEY)") + try db.execute(sql: "CREATE TABLE child (parentId REFERENCES parent)") + do { + _ = try db.foreignKeyViolations(in: "child", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + + do { + _ = try db.checkForeignKeys(in: "child", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + } + } + + func testForeignKeyViolationsMainSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.writeWithoutTransaction { db in + try db.execute(sql: """ + CREATE TABLE parent(id TEXT NOT NULL PRIMARY KEY); + CREATE TABLE child(id INTEGER NOT NULL PRIMARY KEY, parentId TEXT REFERENCES parent(id)); + PRAGMA foreign_keys = OFF; + INSERT INTO child (id, parentId) VALUES (13, '1'); + """) + do { + let violations = try Array(db.foreignKeyViolations(in: "child", in: "main")) + XCTAssertEqual(violations.count, 1) + } + do { + _ = try db.checkForeignKeys(in: "child", in: "main") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(DatabaseError.SQLITE_CONSTRAINT_FOREIGNKEY, error.extendedResultCode) + } + } + } + + func testForeignKeyViolationsInSpecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE parent(id TEXT NOT NULL PRIMARY KEY); + CREATE TABLE child(id INTEGER NOT NULL PRIMARY KEY, parentId TEXT REFERENCES parent(id)); + PRAGMA foreign_keys = OFF; + INSERT INTO child (id, parentId) VALUES (20, '1'); + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE parent(id TEXT NOT NULL PRIMARY KEY); + CREATE TABLE child(id INTEGER NOT NULL PRIMARY KEY, parentId TEXT REFERENCES parent(id)); + PRAGMA foreign_keys = OFF; + INSERT INTO child (id, parentId) VALUES (10, '1'); + """) + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + do { + let violations = try Array(try db.foreignKeyViolations(in: "child", in: "attached")) + XCTAssertEqual(violations.count, 1) + if let violation = violations.first(where: { $0.originRowID == 20 }) { + XCTAssertEqual(violation.originTable, "child") + XCTAssertEqual(violation.destinationTable, "parent") + } else { + XCTFail("Missing violation") + } + } + + do { + _ = try db.checkForeignKeys(in: "child", in: "attached") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(DatabaseError.SQLITE_CONSTRAINT_FOREIGNKEY, error.extendedResultCode) + } + } + } + + // The `child` table in the attached database should not + // be found unless explicitly specified as it is after + // `main.child` in resolution order. + func testForeignKeyViolationsInUnspecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE parent(id TEXT NOT NULL PRIMARY KEY); + CREATE TABLE child(id INTEGER NOT NULL PRIMARY KEY, parentId TEXT REFERENCES parent(id)); + PRAGMA foreign_keys = OFF; + INSERT INTO child (id, parentId) VALUES (20, '1'); + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE parent(id TEXT NOT NULL PRIMARY KEY); + CREATE TABLE child(id INTEGER NOT NULL PRIMARY KEY, parentId TEXT REFERENCES parent(id)); + PRAGMA foreign_keys = OFF; + INSERT INTO child (id, parentId) VALUES (10, '1'); + """) + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + do { + let violations = try Array(try db.foreignKeyViolations(in: "child")) + XCTAssertEqual(violations.count, 1) + if let violation = violations.first(where: { $0.originRowID == 10 }) { + XCTAssertEqual(violation.originTable, "child") + XCTAssertEqual(violation.destinationTable, "parent") + } else { + XCTFail("Missing violation") + } + } + + do { + _ = try db.checkForeignKeys(in: "child") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(DatabaseError.SQLITE_CONSTRAINT_FOREIGNKEY, error.extendedResultCode) + } + } + } + + func testForeignKeyViolationsInUnspecifiedSchemaFindsAttachedDatabase() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE parent(id TEXT NOT NULL PRIMARY KEY); + CREATE TABLE child(id INTEGER NOT NULL PRIMARY KEY, parentId TEXT REFERENCES parent(id)); + PRAGMA foreign_keys = OFF; + INSERT INTO child (id, parentId) VALUES (20, '1'); + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + do { + let violations = try Array(try db.foreignKeyViolations(in: "child")) + XCTAssertEqual(violations.count, 1) + if let violation = violations.first(where: { $0.originRowID == 20 }) { + XCTAssertEqual(violation.originTable, "child") + XCTAssertEqual(violation.destinationTable, "parent") + } else { + XCTFail("Missing violation") + } + } + + do { + _ = try db.checkForeignKeys(in: "child") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(DatabaseError.SQLITE_CONSTRAINT_FOREIGNKEY, error.extendedResultCode) + } + } + } } diff --git a/Tests/GRDBTests/FoundationNSUUIDTests.swift b/Tests/GRDBTests/FoundationNSUUIDTests.swift index 0aaf75e2f7..cc7243ce86 100644 --- a/Tests/GRDBTests/FoundationNSUUIDTests.swift +++ b/Tests/GRDBTests/FoundationNSUUIDTests.swift @@ -2,9 +2,9 @@ import XCTest import GRDB class FoundationNSUUIDTests: GRDBTestCase { - private func assert(_ value: DatabaseValueConvertible?, isDecodedAs expectedUUID: NSUUID?) throws { + private func assert(_ value: (any DatabaseValueConvertible)?, isDecodedAs expectedUUID: NSUUID?) throws { try makeDatabaseQueue().read { db in - if let expectedUUID = expectedUUID { + if let expectedUUID { let decodedUUID = try NSUUID.fetchOne(db, sql: "SELECT ?", arguments: [value]) XCTAssertEqual(decodedUUID, expectedUUID) } else if value == nil { diff --git a/Tests/GRDBTests/FoundationUUIDTests.swift b/Tests/GRDBTests/FoundationUUIDTests.swift index d5f05ab303..9adfc228a4 100644 --- a/Tests/GRDBTests/FoundationUUIDTests.swift +++ b/Tests/GRDBTests/FoundationUUIDTests.swift @@ -3,9 +3,9 @@ import Foundation import GRDB class FoundationUUIDTests: GRDBTestCase { - private func assert(_ value: DatabaseValueConvertible?, isDecodedAs expectedUUID: UUID?) throws { + private func assert(_ value: (any DatabaseValueConvertible)?, isDecodedAs expectedUUID: UUID?) throws { try makeDatabaseQueue().read { db in - if let expectedUUID = expectedUUID { + if let expectedUUID { let decodedUUID = try UUID.fetchOne(db, sql: "SELECT ?", arguments: [value]) XCTAssertEqual(decodedUUID, expectedUUID) } else if value == nil { diff --git a/Tests/GRDBTests/GRDBTestCase.swift b/Tests/GRDBTests/GRDBTestCase.swift index 6ea977a0d0..9028d491f4 100644 --- a/Tests/GRDBTests/GRDBTestCase.swift +++ b/Tests/GRDBTests/GRDBTestCase.swift @@ -147,19 +147,50 @@ class GRDBTestCase: XCTestCase { } // Compare SQL strings (ignoring leading and trailing white space and semicolons. - func assertEqualSQL(_ db: Database, _ request: Request, _ sql: String, file: StaticString = #file, line: UInt = #line) throws { + func assertEqualSQL( + _ db: Database, + _ request: some FetchRequest, + _ sql: String, + file: StaticString = #file, + line: UInt = #line) + throws + { try request.makeStatement(db).makeCursor().next() assertEqualSQL(lastSQLQuery!, sql, file: file, line: line) } + // Compare SQL strings. + func assertEqualSQL( + _ db: Database, + _ expression: some SQLExpressible, + _ sql: String, + file: StaticString = #file, + line: UInt = #line) + throws + { + let request: SQLRequest = "SELECT \(expression)" + try assertEqualSQL(db, request, "SELECT \(sql)", file: file, line: line) + } + // Compare SQL strings (ignoring leading and trailing white space and semicolons. - func assertEqualSQL(_ databaseReader: DatabaseReader, _ request: Request, _ sql: String, file: StaticString = #file, line: UInt = #line) throws { + func assertEqualSQL( + _ databaseReader: some DatabaseReader, + _ request: some FetchRequest, + _ sql: String, + file: StaticString = #file, + line: UInt = #line) + throws + { try databaseReader.unsafeRead { db in try assertEqualSQL(db, request, sql, file: file, line: line) } } - - func sql(_ databaseReader: DatabaseReader, _ request: Request) -> String { + + func sql( + _ databaseReader: some DatabaseReader, + _ request: some FetchRequest) + -> String + { try! databaseReader.unsafeRead { db in try request.makeStatement(db).makeCursor().next() return lastSQLQuery! diff --git a/Tests/GRDBTests/IndexInfoTests.swift b/Tests/GRDBTests/IndexInfoTests.swift index d79ffa1eb6..9ce46a36f5 100644 --- a/Tests/GRDBTests/IndexInfoTests.swift +++ b/Tests/GRDBTests/IndexInfoTests.swift @@ -93,4 +93,119 @@ class IndexInfoTests: GRDBTestCase { } } } + + func testUnknownSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT); + CREATE INDEX columnIndex ON player (name); + """) + do { + _ = try db.indexes(on: "player", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + } + } + + func testSpecifiedMainSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT); + CREATE INDEX columnIndex ON player (name); + """) + let indexes = try db.indexes(on: "player", in: "main") + XCTAssertEqual(indexes.count, 1) + XCTAssertEqual(indexes[0].name, "columnIndex") + } + } + + func testSpecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT); + CREATE INDEX columnIndexAttached ON player (name); + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT); + CREATE INDEX columnIndex ON player (name); + """) + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + let mainIndexes = try db.indexes(on: "player", in: "main") + XCTAssertEqual(mainIndexes.count, 1) + XCTAssertEqual(mainIndexes[0].name, "columnIndex") + + let attachedIndexes = try db.indexes(on: "player", in: "attached") + XCTAssertEqual(attachedIndexes.count, 1) + XCTAssertEqual(attachedIndexes[0].name, "columnIndexAttached") + } + } + + // The `player` table in the attached database should never + // be found unless explicitly specified as it is after + // `main.player` in resolution order. + func testUnspecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT); + CREATE INDEX columnIndexAttached ON player (name); + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT); + CREATE INDEX columnIndex ON player (name); + """) + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + let mainIndexes = try db.indexes(on: "player") + XCTAssertEqual(mainIndexes.count, 1) + XCTAssertEqual(mainIndexes[0].name, "columnIndex") + } + } + + func testUnspecifiedSchemaFindsAttachedDatabase() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: """ + CREATE TABLE player (id INTEGER PRIMARY KEY, name TEXT); + CREATE INDEX columnIndexAttached ON player (name); + """) + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + let attachedIndexes = try db.indexes(on: "player", in: "attached") + XCTAssertEqual(attachedIndexes.count, 1) + XCTAssertEqual(attachedIndexes[0].name, "columnIndexAttached") + } + } } diff --git a/Tests/GRDBTests/Issue1383.sqlite b/Tests/GRDBTests/Issue1383.sqlite new file mode 100644 index 0000000000..9b05e30b30 Binary files /dev/null and b/Tests/GRDBTests/Issue1383.sqlite differ diff --git a/Tests/GRDBTests/JSONColumnTests.swift b/Tests/GRDBTests/JSONColumnTests.swift new file mode 100644 index 0000000000..b73c21128d --- /dev/null +++ b/Tests/GRDBTests/JSONColumnTests.swift @@ -0,0 +1,118 @@ +import XCTest +import GRDB + +final class JSONColumnTests: GRDBTestCase { + func test_JSONColumn_derived_from_CodingKey() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + struct Player: Codable, TableRecord, FetchableRecord, PersistableRecord { + var id: Int64 + var info: Data + + enum CodingKeys: String, CodingKey { + case id + case info = "info_json" + } + + enum Columns { + static let id = Column(CodingKeys.id) + static let info = JSONColumn(CodingKeys.info) + } + + static let databaseSelection: [any SQLSelectable] = [Columns.id, Columns.info] + } + + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("info_json", .jsonText) + } + + try assertEqualSQL(db, Player.all(), """ + SELECT "id", "info_json" FROM "player" + """) + } + } + + func test_JSON_EXTRACT() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON_EXTRACT is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON_EXTRACT is not available") + } +#endif + + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("info", .jsonText) + } + + let player = Table("player") + let info = JSONColumn("info") + + try assertEqualSQL(db, player.select(info.jsonExtract(atPath: "$.score")), """ + SELECT JSON_EXTRACT("info", '$.score') FROM "player" + """) + + try assertEqualSQL(db, player.select(info.jsonExtract(atPaths: ["$.score", "$.bonus"])), """ + SELECT JSON_EXTRACT("info", '$.score', '$.bonus') FROM "player" + """) + } + } + + func test_extraction_operators() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON operators are not available") + } +#else + guard #available(iOS 16, macOS 13.2, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON operators are not available") + } +#endif + + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("info", .jsonText) + } + + let player = Table("player") + let info = JSONColumn("info") + + try assertEqualSQL(db, player.select(info["score"]), """ + SELECT "info" ->> 'score' FROM "player" + """) + + try assertEqualSQL(db, player.select(info["$.score"]), """ + SELECT "info" ->> '$.score' FROM "player" + """) + + try assertEqualSQL(db, player.select(info.jsonRepresentation(atPath: "score")), """ + SELECT "info" -> 'score' FROM "player" + """) + + try assertEqualSQL(db, player.select(info.jsonRepresentation(atPath: "$.score")), """ + SELECT "info" -> '$.score' FROM "player" + """) + } + } +} diff --git a/Tests/GRDBTests/JSONExpressionsTests.swift b/Tests/GRDBTests/JSONExpressionsTests.swift new file mode 100644 index 0000000000..f0b4390927 --- /dev/null +++ b/Tests/GRDBTests/JSONExpressionsTests.swift @@ -0,0 +1,1435 @@ +import XCTest +import GRDB + +final class JSONExpressionsTests: GRDBTestCase { + func test_Database_json() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.json(#" { "a": [ "test" ] } "#), """ + JSON(' { "a": [ "test" ] } ') + """) + + try assertEqualSQL(db, player.select(Database.json(nameColumn)), """ + SELECT JSON("name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.json(infoColumn)), """ + SELECT JSON("info") FROM "player" + """) + } + } + + func test_asJSON() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, player.select([ + #"[1, 2, 3]"#.databaseValue.asJSON, + DatabaseValue.null.asJSON, + nameColumn.asJSON, + infoColumn.asJSON, + abs(nameColumn).asJSON, + abs(infoColumn).asJSON, + ]), """ + SELECT \ + '[1, 2, 3]', \ + NULL, \ + "name", \ + "info", \ + ABS("name"), \ + ABS("info") \ + FROM "player" + """) + + try assertEqualSQL(db, player.select([ + Database.jsonArray([ + #"[1, 2, 3]"#.databaseValue.asJSON, + DatabaseValue.null.asJSON, + nameColumn.asJSON, + infoColumn.asJSON, + abs(nameColumn).asJSON, + abs(infoColumn).asJSON, + ]) + ]), """ + SELECT JSON_ARRAY(\ + JSON('[1, 2, 3]'), \ + NULL, \ + JSON("name"), \ + JSON("info"), \ + JSON(ABS("name")), \ + JSON(ABS("info"))\ + ) FROM "player" + """) + } + } + + func test_Database_jsonArray() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonArray(1...4), """ + JSON_ARRAY(1, 2, 3, 4) + """) + + try assertEqualSQL(db, Database.jsonArray([1, 2, 3, 4]), """ + JSON_ARRAY(1, 2, 3, 4) + """) + + try assertEqualSQL(db, Database.jsonArray([1, 2, "3", 4]), """ + JSON_ARRAY(1, 2, '3', 4) + """) + + // Note: this JSON(JSON_EXTRACT(...)) is useful, when the extracted value is a string that contains JSON + try assertEqualSQL(db, player + .select( + Database.jsonArray([ + nameColumn, + nameColumn.asJSON, + infoColumn, + infoColumn.jsonExtract(atPath: "address"), + infoColumn.jsonExtract(atPath: "address").asJSON, + ] as [any SQLExpressible]) + ), """ + SELECT JSON_ARRAY(\ + "name", \ + JSON("name"), \ + JSON("info"), \ + JSON_EXTRACT("info", 'address'), \ + JSON(JSON_EXTRACT("info", 'address'))\ + ) FROM "player" + """) + } + } + + func test_Database_jsonArray_from_SQLJSONExpressible() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 13.2, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + // Note: this JSON(JSON_EXTRACT(...)) is useful, when the extracted value is a string that contains JSON + try assertEqualSQL(db, player + .select( + Database.jsonArray([ + nameColumn, + nameColumn.asJSON, + infoColumn, + infoColumn["score"], + infoColumn["score"].asJSON, + infoColumn.jsonExtract(atPath: "address"), + infoColumn.jsonExtract(atPath: "address").asJSON, + infoColumn.jsonRepresentation(atPath: "address"), + infoColumn.jsonRepresentation(atPath: "address").asJSON, + ] as [any SQLExpressible]) + ), """ + SELECT JSON_ARRAY(\ + "name", \ + JSON("name"), \ + JSON("info"), \ + "info" ->> 'score', \ + JSON("info" ->> 'score'), \ + JSON_EXTRACT("info", 'address'), \ + JSON(JSON_EXTRACT("info", 'address')), \ + "info" -> 'address', \ + "info" -> 'address'\ + ) FROM "player" + """) + + let alias = TableAlias(name: "p") + + try assertEqualSQL(db, player + .aliased(alias) + .select( + alias[ + Database.jsonArray([ + nameColumn, + nameColumn.asJSON, + infoColumn, + infoColumn["score"], + infoColumn.jsonExtract(atPath: "address"), + infoColumn.jsonRepresentation(atPath: "address"), + ] as [any SQLExpressible]) + ] + ), """ + SELECT JSON_ARRAY(\ + "p"."name", \ + JSON("p"."name"), \ + JSON("p"."info"), \ + "p"."info" ->> 'score', \ + JSON_EXTRACT("p"."info", 'address'), \ + "p"."info" -> 'address'\ + ) FROM "player" "p" + """) + + try assertEqualSQL(db, player + .aliased(alias) + .select( + Database.jsonArray([ + alias[nameColumn], + alias[nameColumn.asJSON], + alias[infoColumn], + alias[infoColumn["score"]], + alias[infoColumn.jsonExtract(atPath: "address")], + alias[infoColumn.jsonRepresentation(atPath: "address")], + ] as [any SQLExpressible]) + ), """ + SELECT JSON_ARRAY(\ + "p"."name", \ + JSON("p"."name"), \ + JSON("p"."info"), \ + "p"."info" ->> 'score', \ + JSON_EXTRACT("p"."info", 'address'), \ + "p"."info" -> 'address'\ + ) FROM "player" "p" + """) + + try assertEqualSQL(db, player + .aliased(alias) + .select( + Database.jsonArray([ + alias[nameColumn], + alias[nameColumn].asJSON, + alias[infoColumn], + alias[infoColumn]["score"], + alias[infoColumn].jsonExtract(atPath: "address"), + alias[infoColumn].jsonRepresentation(atPath: "address"), + ] as [any SQLExpressible]) + ), """ + SELECT JSON_ARRAY(\ + "p"."name", \ + JSON("p"."name"), \ + JSON("p"."info"), \ + "p"."info" ->> 'score', \ + JSON_EXTRACT("p"."info", 'address'), \ + "p"."info" -> 'address'\ + ) FROM "player" "p" + """) + } + } + + func test_Database_jsonArrayLength() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonArrayLength("[1,2,3,4]"), """ + JSON_ARRAY_LENGTH('[1,2,3,4]') + """) + + try assertEqualSQL(db, player.select(Database.jsonArrayLength(nameColumn)), """ + SELECT JSON_ARRAY_LENGTH("name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonArrayLength(infoColumn)), """ + SELECT JSON_ARRAY_LENGTH("info") FROM "player" + """) + } + } + + func test_Database_jsonArrayLength_atPath() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonArrayLength(#"{"one":[1,2,3]}"#, atPath: "$.one"), """ + JSON_ARRAY_LENGTH('{"one":[1,2,3]}', '$.one') + """) + + try assertEqualSQL(db, player.select(Database.jsonArrayLength(nameColumn, atPath: "$.a")), """ + SELECT JSON_ARRAY_LENGTH("name", '$.a') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonArrayLength(#"{"one":[1,2,3]}"#, atPath: nameColumn)), """ + SELECT JSON_ARRAY_LENGTH('{"one":[1,2,3]}', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonArrayLength(infoColumn, atPath: "$.a")), """ + SELECT JSON_ARRAY_LENGTH("info", '$.a') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonArrayLength(#"{"one":[1,2,3]}"#, atPath: infoColumn)), """ + SELECT JSON_ARRAY_LENGTH('{"one":[1,2,3]}', "info") FROM "player" + """) + } + } + + func test_Database_jsonErrorPosition() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3042000 else { + throw XCTSkip("JSON_ERROR_JSON is not available") + } + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonErrorPosition(#" { "a": [ "test" ] } "#), """ + JSON_ERROR_POSITION(' { "a": [ "test" ] } ') + """) + + try assertEqualSQL(db, player.select(Database.jsonErrorPosition(nameColumn)), """ + SELECT JSON_ERROR_POSITION("name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonErrorPosition(infoColumn)), """ + SELECT JSON_ERROR_POSITION("info") FROM "player" + """) + } +#else + throw XCTSkip("JSON_ERROR_JSON is not available") +#endif + } + + func test_Database_jsonExtract_atPath() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonExtract(#"{"a":123}"#, atPath: "$.a"), """ + JSON_EXTRACT('{"a":123}', '$.a') + """) + + try assertEqualSQL(db, player.select(Database.jsonExtract(nameColumn, atPath: "$.a")), """ + SELECT JSON_EXTRACT("name", '$.a') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonExtract(infoColumn, atPath: "$.a")), """ + SELECT JSON_EXTRACT("info", '$.a') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonExtract(#"{"a":123}"#, atPath: nameColumn)), """ + SELECT JSON_EXTRACT('{"a":123}', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonExtract(#"{"a":123}"#, atPath: infoColumn)), """ + SELECT JSON_EXTRACT('{"a":123}', "info") FROM "player" + """) + } + } + + func test_Database_jsonExtract_atPaths() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonExtract(#"{"a":2,"c":[4,5]}"#, atPaths: ["$.c", "$.a"]), """ + JSON_EXTRACT('{"a":2,"c":[4,5]}', '$.c', '$.a') + """) + + try assertEqualSQL(db, player.select(Database.jsonExtract(nameColumn, atPaths: ["$.c", "$.a"])), """ + SELECT JSON_EXTRACT("name", '$.c', '$.a') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonExtract(infoColumn, atPaths: ["$.c", "$.a"])), """ + SELECT JSON_EXTRACT("info", '$.c', '$.a') FROM "player" + """) + } + } + + func test_Database_jsonInsert() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonInsert("[1,2,3,4]", ["$[#]": #"{"e":5}"#]), """ + JSON_INSERT('[1,2,3,4]', '$[#]', '{"e":5}') + """) + + try assertEqualSQL(db, Database.jsonInsert("[1,2,3,4]", ["$[#]": #"{"e":5}"#.databaseValue.asJSON]), """ + JSON_INSERT('[1,2,3,4]', '$[#]', JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonInsert("[1,2,3,4]", ["$[#]": Database.json(#"{"e":5}"#)]), """ + JSON_INSERT('[1,2,3,4]', '$[#]', JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonInsert("[1,2,3,4]", ["$[#]": Database.jsonObject(["e": 5])]), """ + JSON_INSERT('[1,2,3,4]', '$[#]', JSON_OBJECT('e', 5)) + """) + + try assertEqualSQL(db, player.select(Database.jsonInsert(nameColumn, ["$[#]": 99])), """ + SELECT JSON_INSERT("name", '$[#]', 99) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonInsert(infoColumn, ["$[#]": 99])), """ + SELECT JSON_INSERT("info", '$[#]', 99) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonInsert("[1,2,3,4]", ["$[#]": nameColumn])), """ + SELECT JSON_INSERT('[1,2,3,4]', '$[#]', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonInsert("[1,2,3,4]", ["$[#]": infoColumn])), """ + SELECT JSON_INSERT('[1,2,3,4]', '$[#]', JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonReplace() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": #"{"e":5}"#]), """ + JSON_REPLACE('{"a":2,"c":4}', '$.a', '{"e":5}') + """) + + try assertEqualSQL(db, Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": #"{"e":5}"#.databaseValue.asJSON]), """ + JSON_REPLACE('{"a":2,"c":4}', '$.a', JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": Database.json(#"{"e":5}"#)]), """ + JSON_REPLACE('{"a":2,"c":4}', '$.a', JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": Database.jsonObject(["e": 5])]), """ + JSON_REPLACE('{"a":2,"c":4}', '$.a', JSON_OBJECT('e', 5)) + """) + + try assertEqualSQL(db, player.select(Database.jsonReplace(nameColumn, ["$.a": 99])), """ + SELECT JSON_REPLACE("name", '$.a', 99) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonReplace(infoColumn, ["$.a": 99])), """ + SELECT JSON_REPLACE("info", '$.a', 99) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": nameColumn])), """ + SELECT JSON_REPLACE('{"a":2,"c":4}', '$.a', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonReplace(#"{"a":2,"c":4}"#, ["$.a": infoColumn])), """ + SELECT JSON_REPLACE('{"a":2,"c":4}', '$.a', JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonSet() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": #"{"e":5}"#]), """ + JSON_SET('{"a":2,"c":4}', '$.a', '{"e":5}') + """) + + try assertEqualSQL(db, Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": #"{"e":5}"#.databaseValue.asJSON]), """ + JSON_SET('{"a":2,"c":4}', '$.a', JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": Database.json(#"{"e":5}"#)]), """ + JSON_SET('{"a":2,"c":4}', '$.a', JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": Database.jsonObject(["e": 5])]), """ + JSON_SET('{"a":2,"c":4}', '$.a', JSON_OBJECT('e', 5)) + """) + + try assertEqualSQL(db, player.select(Database.jsonSet(nameColumn, ["$.a": 99])), """ + SELECT JSON_SET("name", '$.a', 99) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonSet(infoColumn, ["$.a": 99])), """ + SELECT JSON_SET("info", '$.a', 99) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": nameColumn])), """ + SELECT JSON_SET('{"a":2,"c":4}', '$.a', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonSet(#"{"a":2,"c":4}"#, ["$.a": infoColumn])), """ + SELECT JSON_SET('{"a":2,"c":4}', '$.a', JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonObject_from_Dictionary() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL( + db, + Database.jsonObject([ + "a": 2, + ] as [String: Int]), """ + JSON_OBJECT('a', 2) + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + "c": #"{"e":5}"#, + ] as [String: any SQLExpressible]), """ + JSON_OBJECT('c', '{"e":5}') + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + "c": #"{"e":5}"#.databaseValue.asJSON, + ] as [String: any SQLExpressible]), """ + JSON_OBJECT('c', JSON('{"e":5}')) + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + "c": Database.jsonObject(["e": 5]), + ]), """ + JSON_OBJECT('c', JSON_OBJECT('e', 5)) + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + "c": Database.json(#"{"e":5}"#), + ]), """ + JSON_OBJECT('c', JSON('{"e":5}')) + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + "a": nameColumn, + ]) + ), """ + SELECT JSON_OBJECT('a', "name") FROM "player" + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + "c": infoColumn, + ]) + ), """ + SELECT JSON_OBJECT('c', JSON("info")) FROM "player" + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + "a": Database.json(nameColumn), + ]) + ), """ + SELECT JSON_OBJECT('a', JSON("name")) FROM "player" + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + "c": Database.json(infoColumn), + ]) + ), """ + SELECT JSON_OBJECT('c', JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonObject_from_Array() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + // Ordered Array + + try assertEqualSQL( + db, + Database.jsonObject([ + (key: "a", value: 2), + (key: "c", value: #"{"e":5}"#), + ] as [(key: String, value: any SQLExpressible)]), """ + JSON_OBJECT('a', 2, 'c', '{"e":5}') + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + (key: "a", value: 2), + (key: "c", value: #"{"e":5}"#.databaseValue.asJSON), + ] as [(key: String, value: any SQLExpressible)]), """ + JSON_OBJECT('a', 2, 'c', JSON('{"e":5}')) + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + (key: "a", value: 2), + (key: "c", value: Database.jsonObject(["e": 5])), + ] as [(key: String, value: any SQLExpressible)]), """ + JSON_OBJECT('a', 2, 'c', JSON_OBJECT('e', 5)) + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + (key: "a", value: 2), + (key: "c", value: Database.json(#"{"e":5}"#)), + ] as [(key: String, value: any SQLExpressible)]), """ + JSON_OBJECT('a', 2, 'c', JSON('{"e":5}')) + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + (key: "a", value: nameColumn), + (key: "c", value: infoColumn), + ] as [(key: String, value: any SQLExpressible)]) + ), """ + SELECT JSON_OBJECT('a', "name", 'c', JSON("info")) FROM "player" + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + (key: "a", value: Database.json(nameColumn)), + (key: "c", value: Database.json(infoColumn)), + ] as [(key: String, value: SQLExpression)]) + ), """ + SELECT JSON_OBJECT('a', JSON("name"), 'c', JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonObject_from_KeyValuePairs() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + // Ordered Array + + try assertEqualSQL( + db, + Database.jsonObject([ + "a": 2, + "c": #"{"e":5}"#, + ] as KeyValuePairs), """ + JSON_OBJECT('a', 2, 'c', '{"e":5}') + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + "a": 2, + "c": #"{"e":5}"#.databaseValue.asJSON, + ] as KeyValuePairs), """ + JSON_OBJECT('a', 2, 'c', JSON('{"e":5}')) + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + "a": 2, + "c": Database.jsonObject(["e": 5]), + ] as KeyValuePairs), """ + JSON_OBJECT('a', 2, 'c', JSON_OBJECT('e', 5)) + """) + + try assertEqualSQL( + db, + Database.jsonObject([ + "a": 2, + "c": Database.json(#"{"e":5}"#), + ] as KeyValuePairs), """ + JSON_OBJECT('a', 2, 'c', JSON('{"e":5}')) + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + "a": nameColumn, + "c": infoColumn, + ] as KeyValuePairs) + ), """ + SELECT JSON_OBJECT('a', "name", 'c', JSON("info")) FROM "player" + """) + + try assertEqualSQL( + db, + player.select( + Database.jsonObject([ + "a": Database.json(nameColumn), + "c": Database.json(infoColumn), + ] as KeyValuePairs) + ), """ + SELECT JSON_OBJECT('a', JSON("name"), 'c', JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonPatch() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonPatch(#"{"a":1,"b":2}"#, with: #"{"c":3,"d":4}"#), """ + JSON_PATCH('{"a":1,"b":2}', '{"c":3,"d":4}') + """) + + try assertEqualSQL(db, player.select(Database.jsonPatch(#"{"a":1,"b":2}"#, with: nameColumn)), """ + SELECT JSON_PATCH('{"a":1,"b":2}', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonPatch(#"{"a":1,"b":2}"#, with: infoColumn)), """ + SELECT JSON_PATCH('{"a":1,"b":2}', "info") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonPatch(nameColumn, with: #"{"c":3,"d":4}"#)), """ + SELECT JSON_PATCH("name", '{"c":3,"d":4}') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonPatch(infoColumn, with: #"{"c":3,"d":4}"#)), """ + SELECT JSON_PATCH("info", '{"c":3,"d":4}') FROM "player" + """) + } + } + + func test_Database_jsonRemove_atPath() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonRemove("[0,1,2,3,4]", atPath: "$[2]"), """ + JSON_REMOVE('[0,1,2,3,4]', '$[2]') + """) + + try assertEqualSQL(db, player.select(Database.jsonRemove(nameColumn, atPath: "$[2]")), """ + SELECT JSON_REMOVE("name", '$[2]') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonRemove("[0,1,2,3,4]", atPath: nameColumn)), """ + SELECT JSON_REMOVE('[0,1,2,3,4]', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonRemove(infoColumn, atPath: "$[2]")), """ + SELECT JSON_REMOVE("info", '$[2]') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonRemove("[0,1,2,3,4]", atPath: infoColumn)), """ + SELECT JSON_REMOVE('[0,1,2,3,4]', "info") FROM "player" + """) + } + } + + func test_Database_jsonRemove_atPaths() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonRemove("[0,1,2,3,4]", atPaths: ["$[2]", "$[0]"]), """ + JSON_REMOVE('[0,1,2,3,4]', '$[2]', '$[0]') + """) + + try assertEqualSQL(db, player.select(Database.jsonRemove(nameColumn, atPaths: ["$[2]", "$[0]"])), """ + SELECT JSON_REMOVE("name", '$[2]', '$[0]') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonRemove(infoColumn, atPaths: ["$[2]", "$[0]"])), """ + SELECT JSON_REMOVE("info", '$[2]', '$[0]') FROM "player" + """) + } + } + + func test_Database_jsonType() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#), """ + JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}') + """) + + try assertEqualSQL(db, player.select(Database.jsonType(nameColumn)), """ + SELECT JSON_TYPE("name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonType(infoColumn)), """ + SELECT JSON_TYPE("info") FROM "player" + """) + } + } + + func test_Database_jsonType_atPath() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#, atPath: "$.a"), """ + JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}', '$.a') + """) + + try assertEqualSQL(db, player.select(Database.jsonType(nameColumn, atPath: "$.a")), """ + SELECT JSON_TYPE("name", '$.a') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonType(infoColumn, atPath: "$.a")), """ + SELECT JSON_TYPE("info", '$.a') FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#, atPath: nameColumn)), """ + SELECT JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}', "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonType(#"{"a":[2,3.5,true,false,null,"x"]}"#, atPath: infoColumn)), """ + SELECT JSON_TYPE('{"a":[2,3.5,true,false,null,"x"]}', "info") FROM "player" + """) + } + } + + func test_Database_jsonIsValid() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonIsValid(#"{"x":35""#), """ + JSON_VALID('{"x":35"') + """) + + try assertEqualSQL(db, player.select(Database.jsonIsValid(nameColumn)), """ + SELECT JSON_VALID("name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonIsValid(infoColumn)), """ + SELECT JSON_VALID("info") FROM "player" + """) + } + } + + func test_Database_jsonQuote() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, Database.jsonQuote(#"{"e":5}"#), """ + JSON_QUOTE('{"e":5}') + """) + + try assertEqualSQL(db, Database.jsonQuote(#"{"e":5}"#.databaseValue.asJSON), """ + JSON_QUOTE(JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonQuote(Database.json(#"{"e":5}"#)), """ + JSON_QUOTE(JSON('{"e":5}')) + """) + + try assertEqualSQL(db, Database.jsonQuote(Database.jsonObject(["e": 5])), """ + JSON_QUOTE(JSON_OBJECT('e', 5)) + """) + + try assertEqualSQL(db, player.select(Database.jsonQuote(nameColumn)), """ + SELECT JSON_QUOTE("name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonQuote(infoColumn)), """ + SELECT JSON_QUOTE(JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonGroupArray() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(nameColumn)), """ + SELECT JSON_GROUP_ARRAY("name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(infoColumn)), """ + SELECT JSON_GROUP_ARRAY(JSON("info")) FROM "player" + """) + } + } + + func test_Database_jsonGroupArray_filter() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(nameColumn, filter: length(nameColumn) > 0)), """ + SELECT JSON_GROUP_ARRAY("name") FILTER (WHERE LENGTH("name") > 0) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(infoColumn, filter: length(nameColumn) > 0)), """ + SELECT JSON_GROUP_ARRAY(JSON("info")) FILTER (WHERE LENGTH("name") > 0) FROM "player" + """) + } + } + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + func test_Database_jsonGroupArray_order() throws { + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3044000 else { + throw XCTSkip("JSON support is not available") + } + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("name", .text) + t.column("info", .jsonText) + } + let player = Table("player") + let nameColumn = Column("name") + let infoColumn = JSONColumn("info") + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(nameColumn, orderBy: nameColumn)), """ + SELECT JSON_GROUP_ARRAY("name" ORDER BY "name") FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(infoColumn, orderBy: nameColumn.desc)), """ + SELECT JSON_GROUP_ARRAY(JSON("info") ORDER BY "name" DESC) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(nameColumn, orderBy: nameColumn, filter: length(nameColumn) > 0)), """ + SELECT JSON_GROUP_ARRAY("name" ORDER BY "name") FILTER (WHERE LENGTH("name") > 0) FROM "player" + """) + + try assertEqualSQL(db, player.select(Database.jsonGroupArray(infoColumn, orderBy: nameColumn.desc, filter: length(nameColumn) > 0)), """ + SELECT JSON_GROUP_ARRAY(JSON("info") ORDER BY "name" DESC) FILTER (WHERE LENGTH("name") > 0) FROM "player" + """) + } + } +#endif + + func test_Database_jsonGroupObject() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("key", .text) + t.column("value", .jsonText) + } + let player = Table("player") + let keyColumn = Column("key") + let valueColumn = JSONColumn("value") + + try assertEqualSQL(db, player.select(Database.jsonGroupObject(key: keyColumn, value: valueColumn)), """ + SELECT JSON_GROUP_OBJECT("key", JSON("value")) FROM "player" + """) + } + } + + func test_Database_jsonGroupObject_filter() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support is not available") + } +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.column("key", .text) + t.column("value", .jsonText) + } + let player = Table("player") + let keyColumn = Column("key") + let valueColumn = JSONColumn("value") + + try assertEqualSQL(db, player.select(Database.jsonGroupObject(key: keyColumn, value: valueColumn, filter: length(valueColumn) > 0)), """ + SELECT JSON_GROUP_OBJECT("key", JSON("value")) FILTER (WHERE LENGTH("value") > 0) FROM "player" + """) + } + } + + func test_index_and_generated_columns() throws { +#if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3038000 else { + throw XCTSkip("JSON support is not available") + } +#else + guard #available(iOS 16, macOS 12, tvOS 17, watchOS 9, *) else { + throw XCTSkip("JSON support or generated columns are not available") + } + +#endif + + try makeDatabaseQueue().inDatabase { db in + try db.create(table: "player") { t in + t.primaryKey("id", .integer) + t.column("address", .jsonText) + t.column("country", .text) + .generatedAs(JSONColumn("address").jsonExtract(atPath: "$.country")) + .indexed() + } + + XCTAssertEqual(Array(sqlQueries.suffix(2)), [ + """ + CREATE TABLE "player" (\ + "id" INTEGER PRIMARY KEY, \ + "address" TEXT, \ + "country" TEXT GENERATED ALWAYS AS (JSON_EXTRACT("address", '$.country')) VIRTUAL\ + ) + """, + """ + CREATE INDEX "player_on_country" ON "player"("country") + """, + ]) + + try db.create(index: "player_on_address", on: "player", expressions: [ + JSONColumn("address").jsonExtract(atPath: "$.country"), + JSONColumn("address").jsonExtract(atPath: "$.city"), + JSONColumn("address").jsonExtract(atPath: "$.street"), + ]) + + XCTAssertEqual(lastSQLQuery, """ + CREATE INDEX "player_on_address" ON "player"(\ + JSON_EXTRACT("address", '$.country'), \ + JSON_EXTRACT("address", '$.city'), \ + JSON_EXTRACT("address", '$.street')\ + ) + """) + + try db.execute(literal: """ + INSERT INTO player VALUES ( + NULL, + '{"street": "Rue de Belleville", "city": "Paris", "country": "France"}' + ) + """) + + try XCTAssertEqual(String.fetchOne(db, sql: "SELECT country FROM player"), "France") + } + } + +// TODO: Enable when those apis are ready. +// func test_ColumnAssignment() throws { +// #if GRDBCUSTOMSQLITE || GRDBCIPHER +// // Prevent SQLCipher failures +// guard sqlite3_libversion_number() >= 3038000 else { +// throw XCTSkip("JSON support is not available") +// } +// #else +// guard #available(iOS 16, macOS 10.15, tvOS 17, watchOS 9, *) else { +// throw XCTSkip("JSON support is not available") +// } +// #endif +// +// try makeDatabaseQueue().inDatabase { db in +// try db.create(table: "player") { t in +// t.column("name", .text) +// t.column("info", .jsonText) +// } +// +// struct Player: TableRecord { } +// +// try Player.updateAll(db, [ +// JSONColumn("info").jsonPatch(with: Database.jsonObject(["city": "Paris"])) +// ]) +// XCTAssertEqual(lastSQLQuery, """ +// UPDATE "player" SET "info" = JSON_PATCH("info", JSON_OBJECT('city', 'Paris')) +// """) +// +// try Player.updateAll(db, [ +// JSONColumn("info").jsonRemove(atPath: "$.country") +// ]) +// XCTAssertEqual(lastSQLQuery, """ +// UPDATE "player" SET "info" = JSON_REMOVE("info", '$.country') +// """) +// +// try Player.updateAll(db, [ +// JSONColumn("info").jsonRemove(atPaths: ["$.country", "$.city"]) +// ]) +// XCTAssertEqual(lastSQLQuery, """ +// UPDATE "player" SET "info" = JSON_REMOVE("info", '$.country', '$.city') +// """) +// } +// } +} diff --git a/Tests/GRDBTests/JoinSupportTests.swift b/Tests/GRDBTests/JoinSupportTests.swift index 61f94e0270..b2ae7eeeac 100644 --- a/Tests/GRDBTests/JoinSupportTests.swift +++ b/Tests/GRDBTests/JoinSupportTests.swift @@ -50,7 +50,7 @@ private struct T2: Codable, FetchableRecord, TableRecord { private struct T3: Codable, FetchableRecord, TableRecord { static let databaseTableName = "t3" - static let databaseSelection: [SQLSelectable] = [Column("t1id"), Column("name")] + static let databaseSelection: [any SQLSelectable] = [Column("t1id"), Column("name")] var t1id: Int64 var name: String } @@ -92,7 +92,7 @@ private struct FlatModel: FetchableRecord { self.t5count = row.scopes[Scopes.suffix]!["t5count"] } - @available(iOS 13.0, macOS 10.15.0, tvOS 13.0, watchOS 6.0, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) static func modernAll() -> some FetchRequest { all() } @@ -138,7 +138,7 @@ private struct CodableFlatModel: FetchableRecord, Codable { var t3: T3? var t5count: Int - @available(iOS 13.0, macOS 10.15.0, tvOS 13.0, watchOS 6.0, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) static func modernAll() -> some FetchRequest { all() } @@ -186,7 +186,7 @@ private struct CodableNestedModel: FetchableRecord, Codable { var t3: T3? var t5count: Int - @available(iOS 13.0, macOS 10.15.0, tvOS 13.0, watchOS 6.0, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) static func modernAll() -> some FetchRequest { all() } diff --git a/Tests/GRDBTests/MutablePersistableRecordChangesTests.swift b/Tests/GRDBTests/MutablePersistableRecordChangesTests.swift index bfa1bf32ac..f530149f8a 100644 --- a/Tests/GRDBTests/MutablePersistableRecordChangesTests.swift +++ b/Tests/GRDBTests/MutablePersistableRecordChangesTests.swift @@ -561,4 +561,64 @@ class MutablePersistableRecordChangesTests: GRDBTestCase { } } } + + func testDatabaseChangesWithClass() throws { + class Player: Encodable, EncodableRecord { + var id: Int64 + var name: String + var score: Int + + init(id: Int64, name: String, score: Int) { + self.id = id + self.name = name + self.score = score + } + } + + var player = Player(id: 1, name: "Arthur", score: 1000) + do { + let changes = try player.databaseChanges { _ in } + XCTAssert(changes.isEmpty) + } + do { + let changes = try player.databaseChanges { + $0.name = "Barbara" + } + XCTAssertEqual(changes, ["name": "Arthur".databaseValue]) + } + do { + let changes = try player.databaseChanges { + $0.name = "Craig" + $0.score = 200 + } + XCTAssertEqual(changes, ["name": "Barbara".databaseValue, "score": 1000.databaseValue]) + } + } + + func testDatabaseChangesWithStruct() throws { + struct Player: Encodable, EncodableRecord { + var id: Int64 + var name: String + var score: Int + } + + var player = Player(id: 1, name: "Arthur", score: 1000) + do { + let changes = try player.databaseChanges { _ in } + XCTAssert(changes.isEmpty) + } + do { + let changes = try player.databaseChanges { + $0.name = "Barbara" + } + XCTAssertEqual(changes, ["name": "Arthur".databaseValue]) + } + do { + let changes = try player.databaseChanges { + $0.name = "Craig" + $0.score = 200 + } + XCTAssertEqual(changes, ["name": "Barbara".databaseValue, "score": 1000.databaseValue]) + } + } } diff --git a/Tests/GRDBTests/MutablePersistableRecordTests.swift b/Tests/GRDBTests/MutablePersistableRecordTests.swift index 0978f74fe6..18a15c1eeb 100644 --- a/Tests/GRDBTests/MutablePersistableRecordTests.swift +++ b/Tests/GRDBTests/MutablePersistableRecordTests.swift @@ -1253,7 +1253,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1274,7 +1274,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1324,7 +1324,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1415,7 +1415,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1436,7 +1436,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1562,7 +1562,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1692,7 +1692,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1743,7 +1743,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1793,7 +1793,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1847,7 +1847,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1901,7 +1901,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1962,7 +1962,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -2022,7 +2022,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -2088,7 +2088,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -2237,7 +2237,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -2393,7 +2393,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -2496,7 +2496,7 @@ extension MutablePersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif diff --git a/Tests/GRDBTests/OrderedDictionaryTests.swift b/Tests/GRDBTests/OrderedDictionaryTests.swift index cc58230127..3e6b278d12 100644 --- a/Tests/GRDBTests/OrderedDictionaryTests.swift +++ b/Tests/GRDBTests/OrderedDictionaryTests.swift @@ -57,4 +57,33 @@ class OrderedDictionaryTests: GRDBTestCase { } } } + + func testDescription() { + do { + // One key/value pair + let dict: OrderedDictionary = ["toto": "foo"] + XCTAssertEqual(dict.description, #"["toto": "foo"]"#) + } + do { + // Two key/value pairs + let dict: OrderedDictionary = ["toto": "foo", "titi": "bar"] + XCTAssertEqual(dict.description, #"["toto": "foo", "titi": "bar"]"#) + } + } + + func testOrderedDictionaryDescriptionEqualsDictionaryDescription() { + struct Key: Hashable, CustomStringConvertible, CustomDebugStringConvertible { + var description: String { "Key description" } + var debugDescription: String { "Key debugDescription" } + } + struct Value: CustomStringConvertible, CustomDebugStringConvertible { + var description: String { "Value description" } + var debugDescription: String { "Value debugDescription" } + } + do { + let dict = [Key(): Value()] + let orderedDict: OrderedDictionary = [Key(): Value()] + XCTAssertEqual(String(describing: orderedDict), String(describing: dict)) + } + } } diff --git a/Tests/GRDBTests/PersistableRecordTests.swift b/Tests/GRDBTests/PersistableRecordTests.swift index ce4dde8735..d919eeb989 100644 --- a/Tests/GRDBTests/PersistableRecordTests.swift +++ b/Tests/GRDBTests/PersistableRecordTests.swift @@ -1332,7 +1332,7 @@ extension PersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1381,7 +1381,7 @@ extension PersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1434,7 +1434,7 @@ extension PersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1557,7 +1557,7 @@ extension PersistableRecordTests { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -1684,7 +1684,7 @@ extension PersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -1821,7 +1821,7 @@ extension PersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -1966,7 +1966,7 @@ extension PersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -2069,7 +2069,7 @@ extension PersistableRecordTests { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif diff --git a/Tests/GRDBTests/PrimaryKeyInfoTests.swift b/Tests/GRDBTests/PrimaryKeyInfoTests.swift index e6e8fd271a..efe2ac8dac 100644 --- a/Tests/GRDBTests/PrimaryKeyInfoTests.swift +++ b/Tests/GRDBTests/PrimaryKeyInfoTests.swift @@ -37,6 +37,7 @@ class PrimaryKeyInfoTests: GRDBTestCase { try dbQueue.inDatabase { db in try db.execute(sql: "CREATE TABLE items (name TEXT)") let primaryKey = try db.primaryKey("items") + XCTAssertNil(primaryKey.columnInfos) XCTAssertEqual(primaryKey.columns, [Column.rowID.name]) XCTAssertNil(primaryKey.rowIDColumn) XCTAssertTrue(primaryKey.isRowID) @@ -49,6 +50,22 @@ class PrimaryKeyInfoTests: GRDBTestCase { try dbQueue.inDatabase { db in try db.execute(sql: "CREATE TABLE items (id INTEGER PRIMARY KEY, name TEXT)") let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["id"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["INTEGER"]) + XCTAssertEqual(primaryKey.columns, ["id"]) + XCTAssertEqual(primaryKey.rowIDColumn, "id") + XCTAssertTrue(primaryKey.isRowID) + XCTAssertTrue(primaryKey.tableHasRowID) + } + } + + func testIntegerPrimaryKey2() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (id INTEGER, name TEXT, PRIMARY KEY (id))") + let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["id"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["INTEGER"]) XCTAssertEqual(primaryKey.columns, ["id"]) XCTAssertEqual(primaryKey.rowIDColumn, "id") XCTAssertTrue(primaryKey.isRowID) @@ -61,6 +78,8 @@ class PrimaryKeyInfoTests: GRDBTestCase { try dbQueue.inDatabase { db in try db.execute(sql: "CREATE TABLE items (name TEXT PRIMARY KEY)") let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["name"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["TEXT"]) XCTAssertEqual(primaryKey.columns, ["name"]) XCTAssertNil(primaryKey.rowIDColumn) XCTAssertFalse(primaryKey.isRowID) @@ -73,6 +92,8 @@ class PrimaryKeyInfoTests: GRDBTestCase { try dbQueue.inDatabase { db in try db.execute(sql: "CREATE TABLE items (a TEXT, b INTEGER, PRIMARY KEY (a,b))") let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["a", "b"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["TEXT", "INTEGER"]) XCTAssertEqual(primaryKey.columns, ["a", "b"]) XCTAssertNil(primaryKey.rowIDColumn) XCTAssertFalse(primaryKey.isRowID) @@ -85,6 +106,8 @@ class PrimaryKeyInfoTests: GRDBTestCase { try dbQueue.inDatabase { db in try db.execute(sql: "CREATE TABLE items (name TEXT PRIMARY KEY) WITHOUT ROWID") let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["name"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["TEXT"]) XCTAssertEqual(primaryKey.columns, ["name"]) XCTAssertNil(primaryKey.rowIDColumn) XCTAssertFalse(primaryKey.isRowID) @@ -97,10 +120,126 @@ class PrimaryKeyInfoTests: GRDBTestCase { try dbQueue.inDatabase { db in try db.execute(sql: "CREATE TABLE items (a TEXT, b INTEGER, PRIMARY KEY (a,b)) WITHOUT ROWID") let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["a", "b"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["TEXT", "INTEGER"]) XCTAssertEqual(primaryKey.columns, ["a", "b"]) XCTAssertNil(primaryKey.rowIDColumn) XCTAssertFalse(primaryKey.isRowID) XCTAssertFalse(primaryKey.tableHasRowID) } } + + func testUnknownSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (name TEXT)") + do { + _ = try db.primaryKey("items", in: "invalid") + XCTFail("Expected Error") + } catch let error as DatabaseError { + XCTAssertEqual(error.resultCode, .SQLITE_ERROR) + XCTAssertEqual(error.message, "no such schema: invalid") + XCTAssertEqual(error.description, "SQLite error 1: no such schema: invalid") + } + } + } + + func testSpecifiedMainSchema() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (name TEXT)") + let primaryKey = try db.primaryKey("items", in: "main") + XCTAssertNil(primaryKey.columnInfos) + XCTAssertEqual(primaryKey.columns, [Column.rowID.name]) + XCTAssertNil(primaryKey.rowIDColumn) + XCTAssertTrue(primaryKey.isRowID) + XCTAssertTrue(primaryKey.tableHasRowID) + } + } + + func testSpecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (id2 INTEGER PRIMARY KEY)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (id1 INTEGER PRIMARY KEY)") + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + + let primaryKeyMain = try db.primaryKey("items", in: "main") + XCTAssertEqual(primaryKeyMain.columnInfos?.map(\.name), ["id1"]) + XCTAssertEqual(primaryKeyMain.columnInfos?.map(\.type), ["INTEGER"]) + XCTAssertEqual(primaryKeyMain.columns, ["id1"]) + XCTAssertEqual(primaryKeyMain.rowIDColumn, "id1") + XCTAssertTrue(primaryKeyMain.isRowID) + XCTAssertTrue(primaryKeyMain.tableHasRowID) + + let primaryKeyAttached = try db.primaryKey("items", in: "attached") + XCTAssertEqual(primaryKeyAttached.columnInfos?.map(\.name), ["id2"]) + XCTAssertEqual(primaryKeyAttached.columnInfos?.map(\.type), ["INTEGER"]) + XCTAssertEqual(primaryKeyAttached.columns, ["id2"]) + XCTAssertEqual(primaryKeyAttached.rowIDColumn, "id2") + XCTAssertTrue(primaryKeyAttached.isRowID) + XCTAssertTrue(primaryKeyAttached.tableHasRowID) + } + } + + // The `items` table in the attached database should never + // be found unless explicitly specified as it is after + // `main.items` in resolution order. + func testUnspecifiedSchemaWithTableNameCollisions() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (id2 INTEGER PRIMARY KEY)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (id1 INTEGER PRIMARY KEY)") + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["id1"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["INTEGER"]) + XCTAssertEqual(primaryKey.columns, ["id1"]) + XCTAssertEqual(primaryKey.rowIDColumn, "id1") + XCTAssertTrue(primaryKey.isRowID) + XCTAssertTrue(primaryKey.tableHasRowID) + } + } + + func testUnspecifiedSchemaFindsAttachedDatabase() throws { + #if GRDBCIPHER_USE_ENCRYPTION + // Avoid error due to key not being provided: + // file is not a database - while executing `ATTACH DATABASE...` + throw XCTSkip("This test does not support encrypted databases") + #endif + + let attached = try makeDatabaseQueue(filename: "attached1") + try attached.inDatabase { db in + try db.execute(sql: "CREATE TABLE items (id INTEGER PRIMARY KEY)") + } + let main = try makeDatabaseQueue(filename: "main") + try main.inDatabase { db in + try db.execute(literal: "ATTACH DATABASE \(attached.path) AS attached") + let primaryKey = try db.primaryKey("items") + XCTAssertEqual(primaryKey.columnInfos?.map(\.name), ["id"]) + XCTAssertEqual(primaryKey.columnInfos?.map(\.type), ["INTEGER"]) + XCTAssertEqual(primaryKey.columns, ["id"]) + XCTAssertEqual(primaryKey.rowIDColumn, "id") + XCTAssertTrue(primaryKey.isRowID) + XCTAssertTrue(primaryKey.tableHasRowID) + } + } } diff --git a/Tests/GRDBTests/QueryInterfaceExpressionsTests.swift b/Tests/GRDBTests/QueryInterfaceExpressionsTests.swift index 3462bf23cb..e2e50cb240 100644 --- a/Tests/GRDBTests/QueryInterfaceExpressionsTests.swift +++ b/Tests/GRDBTests/QueryInterfaceExpressionsTests.swift @@ -185,7 +185,7 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { "SELECT * FROM \"readers\" WHERE (\"name\" COLLATE NOCASE) NOT IN ('arthur', 'barbara')") XCTAssertEqual( - sql(dbQueue, tableRequest.filter((["arthur", "barbara"] as [SQLExpressible]).contains(Col.name.collating(.nocase)))), + sql(dbQueue, tableRequest.filter((["arthur", "barbara"] as [any SQLExpressible]).contains(Col.name.collating(.nocase)))), "SELECT * FROM \"readers\" WHERE (\"name\" COLLATE NOCASE) IN ('arthur', 'barbara')") // Sequence.contains(): IN operator @@ -239,7 +239,7 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { "SELECT * FROM \"readers\" WHERE (\"name\" COLLATE NOCASE) NOT IN ('arthur', 'barbara')") XCTAssertEqual( - sql(dbQueue, tableRequest.filter((["arthur", "barbara"] as [SQLExpressible]).contains(Col.name).collating(.nocase))), + sql(dbQueue, tableRequest.filter((["arthur", "barbara"] as [any SQLExpressible]).contains(Col.name).collating(.nocase))), "SELECT * FROM \"readers\" WHERE (\"name\" COLLATE NOCASE) IN ('arthur', 'barbara')") // Sequence.contains(): IN operator @@ -660,7 +660,7 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { try dbQueue.write { db in try db.create(table: "parent") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("parent") + t.belongsTo("parent") } try db.create(table: "child") { t in t.column("childParentId", .integer).references("parent") @@ -1311,6 +1311,156 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { } + // MARK: - Bitwise expressions + + func testPrefixBitwiseNotOperator() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(~Col.age)), + "SELECT * FROM \"readers\" WHERE ~\"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(~(Col.age + 1))), + "SELECT * FROM \"readers\" WHERE ~(\"age\" + 1)") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(~(~Col.age + 1))), + "SELECT * FROM \"readers\" WHERE ~((~\"age\") + 1)") + } + + func testInfixLeftShiftOperator() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age << 2)), + "SELECT * FROM \"readers\" WHERE \"age\" << 2") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(2 << Col.age)), + "SELECT * FROM \"readers\" WHERE 2 << \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filterWhenConnected { _ in 2 << 2 }), + "SELECT * FROM \"readers\" WHERE 8") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age << Col.age)), + "SELECT * FROM \"readers\" WHERE \"age\" << \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter((Col.age << Col.age) << 1)), + "SELECT * FROM \"readers\" WHERE (\"age\" << \"age\") << 1") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(1 << [Col.age > 1, Col.age == nil].joined(operator: .and))), + "SELECT * FROM \"readers\" WHERE 1 << ((\"age\" > 1) AND (\"age\" IS NULL))") + } + + func testInfixRightShiftOperator() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age >> 2)), + "SELECT * FROM \"readers\" WHERE \"age\" >> 2") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(2 >> Col.age)), + "SELECT * FROM \"readers\" WHERE 2 >> \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filterWhenConnected { _ in 8 >> 2 }), + "SELECT * FROM \"readers\" WHERE 2") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age >> Col.age)), + "SELECT * FROM \"readers\" WHERE \"age\" >> \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter((Col.age >> Col.age) >> 1)), + "SELECT * FROM \"readers\" WHERE (\"age\" >> \"age\") >> 1") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(1 >> [Col.age > 1, Col.age == nil].joined(operator: .and))), + "SELECT * FROM \"readers\" WHERE 1 >> ((\"age\" > 1) AND (\"age\" IS NULL))") + } + + func testInfixBitwiseAndOperator() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age & 2)), + "SELECT * FROM \"readers\" WHERE \"age\" & 2") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(2 & Col.age)), + "SELECT * FROM \"readers\" WHERE 2 & \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filterWhenConnected { _ in 2 & 2 }), + "SELECT * FROM \"readers\" WHERE 2") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age & Col.age)), + "SELECT * FROM \"readers\" WHERE \"age\" & \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(2 & (Col.age & Col.age))), + "SELECT * FROM \"readers\" WHERE 2 & \"age\" & \"age\"") + } + + func testJoinedBitwiseAndOperator() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select([].joined(operator: .bitwiseAnd))), + "SELECT -1 FROM \"readers\"") + + XCTAssertEqual( + sql(dbQueue, tableRequest.select([Col.age, Col.age].joined(operator: .bitwiseAnd))), + "SELECT \"age\" & \"age\" FROM \"readers\"") + + XCTAssertEqual( + sql(dbQueue, tableRequest.select([Col.age, 2.databaseValue, Col.age].joined(operator: .bitwiseAnd))), + "SELECT \"age\" & 2 & \"age\" FROM \"readers\"") + + // Flattened + XCTAssertEqual( + sql(dbQueue, tableRequest.select([ + [Col.age, 1.databaseValue].joined(operator: .bitwiseAnd), + [2.databaseValue, Col.age].joined(operator: .bitwiseAnd), + ].joined(operator: .bitwiseAnd))), + "SELECT \"age\" & 1 & 2 & \"age\" FROM \"readers\"") + } + + func testInfixBitwiseOrOperator() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age | 2)), + "SELECT * FROM \"readers\" WHERE \"age\" | 2") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(2 | Col.age)), + "SELECT * FROM \"readers\" WHERE 2 | \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filterWhenConnected { _ in 2 | 2 }), + "SELECT * FROM \"readers\" WHERE 2") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(Col.age | Col.age)), + "SELECT * FROM \"readers\" WHERE \"age\" | \"age\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.filter(2 | (Col.age | Col.age))), + "SELECT * FROM \"readers\" WHERE 2 | \"age\" | \"age\"") + } + + func testJoinedBitwiseOrOperator() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select([].joined(operator: .bitwiseOr))), + "SELECT 0 FROM \"readers\"") + + XCTAssertEqual( + sql(dbQueue, tableRequest.select([Col.age, Col.age].joined(operator: .bitwiseOr))), + "SELECT \"age\" | \"age\" FROM \"readers\"") + + XCTAssertEqual( + sql(dbQueue, tableRequest.select([Col.age, 2.databaseValue, Col.age].joined(operator: .bitwiseOr))), + "SELECT \"age\" | 2 | \"age\" FROM \"readers\"") + + // Flattened + XCTAssertEqual( + sql(dbQueue, tableRequest.select([ + [Col.age, 1.databaseValue].joined(operator: .bitwiseOr), + [2.databaseValue, Col.age].joined(operator: .bitwiseOr), + ].joined(operator: .bitwiseOr))), + "SELECT \"age\" | 1 | 2 | \"age\" FROM \"readers\"") + } + // MARK: - IFNULL expression func testIfNull() throws { @@ -1355,6 +1505,36 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { "SELECT AVG(\"age\" / 2) FROM \"readers\"") } + func testAvgExpression_filter() throws { + #if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3030000 else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #else + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #endif + + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(average(Col.age, filter: Col.age > 0))), + "SELECT AVG(\"age\") FILTER (WHERE \"age\" > 0) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(average(Col.age / 2, filter: Col.age > 0))), + "SELECT AVG(\"age\" / 2) FILTER (WHERE \"age\" > 0) FROM \"readers\"") + } + + func testCastExpression() throws { + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(cast(Col.name, as: .blob))), + "SELECT CAST(\"name\" AS BLOB) FROM \"readers\"") + } + func testLengthExpression() throws { let dbQueue = try makeDatabaseQueue() @@ -1374,6 +1554,28 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { "SELECT MIN(\"age\" / 2) FROM \"readers\"") } + func testMinExpression_filter() throws { + #if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3030000 else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #else + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #endif + + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(min(Col.age, filter: Col.age > 0))), + "SELECT MIN(\"age\") FILTER (WHERE \"age\" > 0) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(min(Col.age / 2, filter: Col.age > 0))), + "SELECT MIN(\"age\" / 2) FILTER (WHERE \"age\" > 0) FROM \"readers\"") + } + func testMaxExpression() throws { let dbQueue = try makeDatabaseQueue() @@ -1385,6 +1587,28 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { "SELECT MAX(\"age\" / 2) FROM \"readers\"") } + func testMaxExpression_filter() throws { + #if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3030000 else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #else + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #endif + + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(max(Col.age, filter: Col.age < 0))), + "SELECT MAX(\"age\") FILTER (WHERE \"age\" < 0) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(max(Col.age / 2, filter: Col.age < 0))), + "SELECT MAX(\"age\" / 2) FILTER (WHERE \"age\" < 0) FROM \"readers\"") + } + func testSumExpression() throws { let dbQueue = try makeDatabaseQueue() @@ -1396,6 +1620,52 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { "SELECT SUM(\"age\" / 2) FROM \"readers\"") } + func testSumExpression_filter() throws { + #if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3030000 else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #else + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #endif + + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(sum(Col.age, filter: Col.age > 0))), + "SELECT SUM(\"age\") FILTER (WHERE \"age\" > 0) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(sum(Col.age / 2, filter: Col.age > 0))), + "SELECT SUM(\"age\" / 2) FILTER (WHERE \"age\" > 0) FROM \"readers\"") + } + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + func testSumExpression_order() throws { + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3044000 else { + throw XCTSkip("ORDER BY clause on aggregate functions is not available") + } + + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(sum(Col.age, orderBy: Col.age))), + "SELECT SUM(\"age\" ORDER BY \"age\") FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(sum(Col.age / 2, orderBy: Col.age.desc))), + "SELECT SUM(\"age\" / 2 ORDER BY \"age\" DESC) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(sum(Col.age, orderBy: Col.age, filter: Col.age > 0))), + "SELECT SUM(\"age\" ORDER BY \"age\") FILTER (WHERE \"age\" > 0) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(sum(Col.age / 2, orderBy: Col.age.desc, filter: Col.age > 0))), + "SELECT SUM(\"age\" / 2 ORDER BY \"age\" DESC) FILTER (WHERE \"age\" > 0) FROM \"readers\"") + } +#endif + func testTotalExpression() throws { let dbQueue = try makeDatabaseQueue() @@ -1407,6 +1677,51 @@ class QueryInterfaceExpressionsTests: GRDBTestCase { "SELECT TOTAL(\"age\" / 2) FROM \"readers\"") } + func testTotalExpression_filter() throws { + #if GRDBCUSTOMSQLITE || GRDBCIPHER + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3030000 else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #else + guard #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) else { + throw XCTSkip("FILTER clause on aggregate functions is not available") + } + #endif + + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(total(Col.age, filter: Col.age > 0))), + "SELECT TOTAL(\"age\") FILTER (WHERE \"age\" > 0) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(total(Col.age / 2, filter: Col.age > 0))), + "SELECT TOTAL(\"age\" / 2) FILTER (WHERE \"age\" > 0) FROM \"readers\"") + } + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + func testTotalExpression_order() throws { + // Prevent SQLCipher failures + guard sqlite3_libversion_number() >= 3044000 else { + throw XCTSkip("ORDER BY clause on aggregate functions is not available") + } + + let dbQueue = try makeDatabaseQueue() + + XCTAssertEqual( + sql(dbQueue, tableRequest.select(total(Col.age, orderBy: Col.age))), + "SELECT TOTAL(\"age\" ORDER BY \"age\") FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(total(Col.age / 2, orderBy: Col.age.desc))), + "SELECT TOTAL(\"age\" / 2 ORDER BY \"age\" DESC) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(total(Col.age, orderBy: Col.age, filter: Col.age > 0))), + "SELECT TOTAL(\"age\" ORDER BY \"age\") FILTER (WHERE \"age\" > 0) FROM \"readers\"") + XCTAssertEqual( + sql(dbQueue, tableRequest.select(total(Col.age / 2, orderBy: Col.age.desc, filter: Col.age > 0))), + "SELECT TOTAL(\"age\" / 2 ORDER BY \"age\" DESC) FILTER (WHERE \"age\" > 0) FROM \"readers\"") + } +#endif // MARK: - LIKE operator diff --git a/Tests/GRDBTests/QueryInterfaceExtensibilityTests.swift b/Tests/GRDBTests/QueryInterfaceExtensibilityTests.swift index 0c54705fba..ac708cf355 100644 --- a/Tests/GRDBTests/QueryInterfaceExtensibilityTests.swift +++ b/Tests/GRDBTests/QueryInterfaceExtensibilityTests.swift @@ -1,7 +1,7 @@ import XCTest import GRDB -private func cast(_ value: T, as type: Database.ColumnType) -> SQLExpression { +private func myCast(_ value: T, as type: Database.ColumnType) -> SQLExpression { SQL("CAST(\(value) AS \(sql: type.rawValue))").sqlExpression } @@ -19,7 +19,7 @@ class QueryInterfaceExtensibilityTests: GRDBTestCase { try db.execute(sql: "INSERT INTO records (text) VALUES (?)", arguments: ["foo"]) do { - let request = Record.select(cast(Column("text"), as: .blob)) + let request = Record.select(myCast(Column("text"), as: .blob)) let dbValue = try DatabaseValue.fetchOne(db, request)! switch dbValue.storage { case .blob: @@ -30,7 +30,7 @@ class QueryInterfaceExtensibilityTests: GRDBTestCase { XCTAssertEqual(self.lastSQLQuery, "SELECT CAST(\"text\" AS BLOB) FROM \"records\" LIMIT 1") } do { - let request = Record.select(cast(Column("text"), as: .blob) && true) + let request = Record.select(myCast(Column("text"), as: .blob) && true) _ = try DatabaseValue.fetchOne(db, request)! XCTAssertEqual(self.lastSQLQuery, "SELECT (CAST(\"text\" AS BLOB)) AND 1 FROM \"records\" LIMIT 1") } diff --git a/Tests/GRDBTests/QueryInterfacePromiseTests.swift b/Tests/GRDBTests/QueryInterfacePromiseTests.swift index 1cd1c14dec..fc4fb50cfd 100644 --- a/Tests/GRDBTests/QueryInterfacePromiseTests.swift +++ b/Tests/GRDBTests/QueryInterfacePromiseTests.swift @@ -14,7 +14,7 @@ class QueryInterfacePromiseTests: GRDBTestCase { try dbWriter.write { db in try db.create(table: "node") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("node") + t.belongsTo("parent", inTable: "node") } } } diff --git a/Tests/GRDBTests/QueryInterfaceRequestTests.swift b/Tests/GRDBTests/QueryInterfaceRequestTests.swift index fa3c35f1ce..91f0323549 100644 --- a/Tests/GRDBTests/QueryInterfaceRequestTests.swift +++ b/Tests/GRDBTests/QueryInterfaceRequestTests.swift @@ -136,6 +136,21 @@ class QueryInterfaceRequestTests: GRDBTestCase { } } + // Regression test for + func testIssue1357() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = tableRequest + .annotated(with: Column("name").forKey("alt")) + .filter(Column("alt").detached) + + XCTAssertEqual(try request.fetchCount(db), 0) + XCTAssertEqual(lastSQLQuery, """ + SELECT COUNT(*) FROM (SELECT *, "name" AS "alt" FROM "readers" WHERE "alt") + """) + } + } + // MARK: - Select @@ -285,7 +300,7 @@ class QueryInterfaceRequestTests: GRDBTestCase { } try db.create(table: "book") { t in t.autoIncrementedPrimaryKey("id") - t.column("authorId", .integer).references("author") + t.belongsTo("author") } try db.execute(sql: """ INSERT INTO author(id, name) VALUES (1, 'Arthur'); @@ -764,7 +779,7 @@ class QueryInterfaceRequestTests: GRDBTestCase { sql(dbQueue, tableRequest.order(Col.age.descNullsFirst)), "SELECT * FROM \"readers\" ORDER BY \"age\" DESC NULLS FIRST") #elseif !GRDBCIPHER - if #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) { + if #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) { XCTAssertEqual( sql(dbQueue, tableRequest.order(Col.age.ascNullsLast)), "SELECT * FROM \"readers\" ORDER BY \"age\" ASC NULLS LAST") @@ -794,7 +809,7 @@ class QueryInterfaceRequestTests: GRDBTestCase { sql(dbQueue, tableRequest.order(Col.name.collating(.nocase).descNullsFirst)), "SELECT * FROM \"readers\" ORDER BY \"name\" COLLATE NOCASE DESC NULLS FIRST") #elseif !GRDBCIPHER - if #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) { + if #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) { XCTAssertEqual( sql(dbQueue, tableRequest.order(Col.name.collating(.nocase).ascNullsLast)), "SELECT * FROM \"readers\" ORDER BY \"name\" COLLATE NOCASE ASC NULLS LAST") @@ -843,7 +858,7 @@ class QueryInterfaceRequestTests: GRDBTestCase { sql(dbQueue, tableRequest.order(Col.age.ascNullsLast).reversed()), "SELECT * FROM \"readers\" ORDER BY \"age\" DESC NULLS FIRST") #elseif !GRDBCIPHER - if #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) { + if #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) { XCTAssertEqual( sql(dbQueue, tableRequest.order(Col.age.descNullsFirst).reversed()), "SELECT * FROM \"readers\" ORDER BY \"age\" ASC NULLS LAST") @@ -873,7 +888,7 @@ class QueryInterfaceRequestTests: GRDBTestCase { sql(dbQueue, tableRequest.order(Col.name.collating(.nocase).descNullsFirst).reversed()), "SELECT * FROM \"readers\" ORDER BY \"name\" COLLATE NOCASE ASC NULLS LAST") #elseif !GRDBCIPHER - if #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) { + if #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) { XCTAssertEqual( sql(dbQueue, tableRequest.order(Col.name.collating(.nocase).ascNullsLast).reversed()), "SELECT * FROM \"readers\" ORDER BY \"name\" COLLATE NOCASE DESC NULLS FIRST") diff --git a/Tests/GRDBTests/RecordMinimalNonOptionalPrimaryKeySingleTests.swift b/Tests/GRDBTests/RecordMinimalNonOptionalPrimaryKeySingleTests.swift index 81240e7c2c..1ce98c1829 100644 --- a/Tests/GRDBTests/RecordMinimalNonOptionalPrimaryKeySingleTests.swift +++ b/Tests/GRDBTests/RecordMinimalNonOptionalPrimaryKeySingleTests.swift @@ -41,7 +41,7 @@ private class MinimalNonOptionalPrimaryKeySingle: Record, Hashable { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension MinimalNonOptionalPrimaryKeySingle: Identifiable { } class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { @@ -436,6 +436,17 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = MinimalNonOptionalPrimaryKeySingle.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"minimalSingles\" ORDER BY \"id\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { @@ -460,7 +471,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [String] = [] let cursor = try MinimalNonOptionalPrimaryKeySingle.fetchCursor(db, ids: ids) @@ -499,7 +510,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.id }), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [String] = [] let fetchedRecords = try MinimalNonOptionalPrimaryKeySingle.fetchAll(db, ids: ids) @@ -537,7 +548,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.id }), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [String] = [] let fetchedRecords = try MinimalNonOptionalPrimaryKeySingle.fetchSet(db, ids: ids) @@ -572,7 +583,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalSingles\" WHERE \"id\" = '\(record.id)'") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try MinimalNonOptionalPrimaryKeySingle.fetchOne(db, id: record.id)! XCTAssertTrue(fetchedRecord.id == record.id) @@ -600,7 +611,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalSingles\" WHERE \"id\" = '\(record.id)'") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { _ = try MinimalNonOptionalPrimaryKeySingle.find(db, key: "missing") XCTFail("Expected RecordError") @@ -640,7 +651,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [String] = [] let cursor = try MinimalNonOptionalPrimaryKeySingle.filter(ids: ids).fetchCursor(db) @@ -679,7 +690,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.id }), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [String] = [] let fetchedRecords = try MinimalNonOptionalPrimaryKeySingle.filter(ids: ids).fetchAll(db) @@ -717,7 +728,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.id }), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [String] = [] let fetchedRecords = try MinimalNonOptionalPrimaryKeySingle.filter(ids: ids).fetchSet(db) @@ -752,7 +763,7 @@ class RecordMinimalNonOptionalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalSingles\" WHERE \"id\" = '\(record.id)'") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try MinimalNonOptionalPrimaryKeySingle.filter(id: record.id).fetchOne(db)! XCTAssertTrue(fetchedRecord.id == record.id) diff --git a/Tests/GRDBTests/RecordMinimalPrimaryKeyRowIDTests.swift b/Tests/GRDBTests/RecordMinimalPrimaryKeyRowIDTests.swift index 4bdc85eca7..43e649f467 100644 --- a/Tests/GRDBTests/RecordMinimalPrimaryKeyRowIDTests.swift +++ b/Tests/GRDBTests/RecordMinimalPrimaryKeyRowIDTests.swift @@ -45,7 +45,7 @@ class MinimalRowID : Record, Hashable { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension MinimalRowID: Identifiable { } class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { @@ -470,6 +470,17 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = MinimalRowID.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"minimalRowIDs\" ORDER BY \"id\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { @@ -494,7 +505,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let cursor = try MinimalRowID.fetchCursor(db, ids: ids) @@ -533,7 +544,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try MinimalRowID.fetchAll(db, ids: ids) @@ -571,7 +582,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try MinimalRowID.fetchSet(db, ids: ids) @@ -606,7 +617,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalRowIDs\" WHERE \"id\" = \(record.id!)") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try MinimalRowID.fetchOne(db, id: record.id!)! XCTAssertTrue(fetchedRecord.id == record.id) @@ -637,7 +648,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalRowIDs\" WHERE \"id\" = \(record.id!)") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { _ = try MinimalRowID.find(db, id: -1) XCTFail("Expected RecordError") @@ -680,7 +691,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let cursor = try MinimalRowID.filter(ids: ids).fetchCursor(db) @@ -719,7 +730,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try MinimalRowID.filter(ids: ids).fetchAll(db) @@ -757,7 +768,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try MinimalRowID.filter(ids: ids).fetchSet(db) @@ -792,7 +803,7 @@ class RecordMinimalPrimaryKeyRowIDTests : GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalRowIDs\" WHERE \"id\" = \(record.id!)") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try MinimalRowID.filter(id: record.id!).fetchOne(db)! XCTAssertTrue(fetchedRecord.id == record.id) diff --git a/Tests/GRDBTests/RecordMinimalPrimaryKeySingleTests.swift b/Tests/GRDBTests/RecordMinimalPrimaryKeySingleTests.swift index 86bf02fcb4..8c5c1f0369 100644 --- a/Tests/GRDBTests/RecordMinimalPrimaryKeySingleTests.swift +++ b/Tests/GRDBTests/RecordMinimalPrimaryKeySingleTests.swift @@ -39,7 +39,7 @@ class MinimalSingle: Record, Hashable { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension MinimalSingle: Identifiable { /// Test non-optional ID type var id: String { UUID! } @@ -492,6 +492,17 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = MinimalSingle.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"minimalSingles\" ORDER BY \"UUID\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { @@ -518,7 +529,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let UUIDs: [String] = [] let cursor = try MinimalSingle.fetchCursor(db, ids: UUIDs) @@ -559,7 +570,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.UUID! }), Set(UUIDs)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let UUIDs: [String] = [] let fetchedRecords = try MinimalSingle.fetchAll(db, ids: UUIDs) @@ -599,7 +610,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.UUID! }), Set(UUIDs)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let UUIDs: [String] = [] let fetchedRecords = try MinimalSingle.fetchSet(db, ids: UUIDs) @@ -635,7 +646,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalSingles\" WHERE \"UUID\" = '\(record.UUID!)'") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try MinimalSingle.fetchOne(db, id: record.UUID!)! XCTAssertTrue(fetchedRecord.UUID == record.UUID) @@ -664,7 +675,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalSingles\" WHERE \"UUID\" = '\(record.UUID!)'") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { _ = try MinimalSingle.find(db, id: "missing") XCTFail("Expected RecordError") @@ -706,7 +717,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let UUIDs: [String] = [] let cursor = try MinimalSingle.filter(ids: UUIDs).fetchCursor(db) @@ -747,7 +758,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.UUID! }), Set(UUIDs)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let UUIDs: [String] = [] let fetchedRecords = try MinimalSingle.filter(ids: UUIDs).fetchAll(db) @@ -787,7 +798,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map { $0.UUID! }), Set(UUIDs)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let UUIDs: [String] = [] let fetchedRecords = try MinimalSingle.filter(ids: UUIDs).fetchSet(db) @@ -823,7 +834,7 @@ class RecordMinimalPrimaryKeySingleTests: GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT * FROM \"minimalSingles\" WHERE \"UUID\" = '\(record.UUID!)'") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try MinimalSingle.filter(id: record.UUID!).fetchOne(db)! XCTAssertTrue(fetchedRecord.UUID == record.UUID) diff --git a/Tests/GRDBTests/RecordPersistenceConflictPolicy.swift b/Tests/GRDBTests/RecordPersistenceConflictPolicy.swift index 38f4e1ec54..0ed1a00e9f 100644 --- a/Tests/GRDBTests/RecordPersistenceConflictPolicy.swift +++ b/Tests/GRDBTests/RecordPersistenceConflictPolicy.swift @@ -14,14 +14,14 @@ class RecordPersistenceConflictPolicyTests: GRDBTestCase { func testDefaultPersistenceConflictPolicy() { let record = RecordWithoutPersistenceConflictPolicy() - let policy = type(of: record as MutablePersistableRecord).persistenceConflictPolicy + let policy = type(of: record).persistenceConflictPolicy XCTAssertEqual(policy.conflictResolutionForInsert, .abort) XCTAssertEqual(policy.conflictResolutionForUpdate, .abort) } func testConfigurablePersistenceConflictPolicy() { let record = RecordWithPersistenceConflictPolicy() - let policy = type(of: record as MutablePersistableRecord).persistenceConflictPolicy + let policy = type(of: record).persistenceConflictPolicy XCTAssertEqual(policy.conflictResolutionForInsert, .fail) XCTAssertEqual(policy.conflictResolutionForUpdate, .ignore) } diff --git a/Tests/GRDBTests/RecordPrimaryKeyHiddenRowIDTests.swift b/Tests/GRDBTests/RecordPrimaryKeyHiddenRowIDTests.swift index bd9e4d0eb1..6777722816 100644 --- a/Tests/GRDBTests/RecordPrimaryKeyHiddenRowIDTests.swift +++ b/Tests/GRDBTests/RecordPrimaryKeyHiddenRowIDTests.swift @@ -27,7 +27,7 @@ private class Person : Record, Hashable { // Record - override static var databaseSelection: [SQLSelectable] { + override static var databaseSelection: [any SQLSelectable] { [AllColumns(), Column.rowID] } @@ -77,7 +77,7 @@ private class Person : Record, Hashable { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Person: Identifiable { } class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { @@ -562,6 +562,17 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = Person.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT *, \"rowid\" FROM \"persons\" ORDER BY \"rowid\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { @@ -586,7 +597,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let cursor = try Person.fetchCursor(db, ids: ids) @@ -625,7 +636,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try Person.fetchAll(db, ids: ids) @@ -663,7 +674,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try Person.fetchSet(db, ids: ids) @@ -701,7 +712,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT *, \"rowid\" FROM \"persons\" WHERE \"rowid\" = \(record.id!)") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try Person.fetchOne(db, id: record.id!)! XCTAssertTrue(fetchedRecord.id == record.id) @@ -738,7 +749,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT *, \"rowid\" FROM \"persons\" WHERE \"rowid\" = \(record.id!)") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { _ = try Person.find(db, id: -1) XCTFail("Expected RecordError") @@ -784,7 +795,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertTrue(try cursor.next() == nil) // end } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let cursor = try Person.filter(ids: ids).fetchCursor(db) @@ -823,7 +834,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try Person.filter(ids: ids).fetchAll(db) @@ -861,7 +872,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertEqual(Set(fetchedRecords.map(\.id)), Set(ids)) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let ids: [Int64] = [] let fetchedRecords = try Person.filter(ids: ids).fetchSet(db) @@ -899,7 +910,7 @@ class RecordPrimaryKeyHiddenRowIDTests : GRDBTestCase { XCTAssertEqual(lastSQLQuery, "SELECT *, \"rowid\" FROM \"persons\" WHERE \"rowid\" = \(record.id!)") } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { do { let fetchedRecord = try Person.filter(id: record.id!).fetchOne(db)! XCTAssertTrue(fetchedRecord.id == record.id) diff --git a/Tests/GRDBTests/RecordPrimaryKeyMultipleTests.swift b/Tests/GRDBTests/RecordPrimaryKeyMultipleTests.swift index cf1cbe4ef3..f8b8c8e109 100644 --- a/Tests/GRDBTests/RecordPrimaryKeyMultipleTests.swift +++ b/Tests/GRDBTests/RecordPrimaryKeyMultipleTests.swift @@ -510,6 +510,17 @@ class RecordPrimaryKeyMultipleTests: GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = Citizenship.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"citizenships\" ORDER BY \"personName\", \"countryName\"") + } + } + + // MARK: - Exists func testExistsWithNilPrimaryKeyReturnsFalse() throws { diff --git a/Tests/GRDBTests/RecordPrimaryKeyNoneTests.swift b/Tests/GRDBTests/RecordPrimaryKeyNoneTests.swift index d07baeaa13..8582a5e488 100644 --- a/Tests/GRDBTests/RecordPrimaryKeyNoneTests.swift +++ b/Tests/GRDBTests/RecordPrimaryKeyNoneTests.swift @@ -147,6 +147,17 @@ class RecordPrimaryKeyNoneTests: GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = Item.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"items\" ORDER BY \"rowid\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { diff --git a/Tests/GRDBTests/RecordPrimaryKeyRowIDTests.swift b/Tests/GRDBTests/RecordPrimaryKeyRowIDTests.swift index ab9abe37cc..eb9023cf24 100644 --- a/Tests/GRDBTests/RecordPrimaryKeyRowIDTests.swift +++ b/Tests/GRDBTests/RecordPrimaryKeyRowIDTests.swift @@ -555,6 +555,17 @@ class RecordPrimaryKeyRowIDTests: GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = Person.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"persons\" ORDER BY \"id\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { diff --git a/Tests/GRDBTests/RecordPrimaryKeySingleTests.swift b/Tests/GRDBTests/RecordPrimaryKeySingleTests.swift index 8342ebab8a..69664f7295 100644 --- a/Tests/GRDBTests/RecordPrimaryKeySingleTests.swift +++ b/Tests/GRDBTests/RecordPrimaryKeySingleTests.swift @@ -497,6 +497,17 @@ class RecordPrimaryKeySingleTests: GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = Pet.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"pets\" ORDER BY \"UUID\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { diff --git a/Tests/GRDBTests/RecordPrimaryKeySingleWithReplaceConflictResolutionTests.swift b/Tests/GRDBTests/RecordPrimaryKeySingleWithReplaceConflictResolutionTests.swift index 05faac4575..e1444cdf31 100644 --- a/Tests/GRDBTests/RecordPrimaryKeySingleWithReplaceConflictResolutionTests.swift +++ b/Tests/GRDBTests/RecordPrimaryKeySingleWithReplaceConflictResolutionTests.swift @@ -494,6 +494,17 @@ class RecordPrimaryKeySingleWithReplaceConflictResolutionTests: GRDBTestCase { } + // MARK: - Stable order + + func testStableOrder() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + let request = Email.all().withStableOrder() + try assertEqualSQL(db, request, "SELECT * FROM \"emails\" ORDER BY \"email\"") + } + } + + // MARK: - Fetch With Primary Key func testFetchCursorWithPrimaryKeys() throws { diff --git a/Tests/GRDBTests/RowAdapterTests.swift b/Tests/GRDBTests/RowAdapterTests.swift index bcf7a3f684..7fa4f3c363 100644 --- a/Tests/GRDBTests/RowAdapterTests.swift +++ b/Tests/GRDBTests/RowAdapterTests.swift @@ -631,7 +631,7 @@ class AdapterRowTests : RowTestCase { copiedRow = baseRow.copy() } - if let copiedRow = copiedRow { + if let copiedRow { XCTAssertEqual(copiedRow.count, 3) XCTAssertEqual(copiedRow["a"] as Int, 0) XCTAssertEqual(copiedRow["b"] as Int, 1) @@ -668,7 +668,7 @@ class AdapterRowTests : RowTestCase { row = baseRow.copy() XCTAssertEqual(row, baseRow) } - if let row = row { + if let row { let copiedRow = row.copy() XCTAssertEqual(row, copiedRow) } else { diff --git a/Tests/GRDBTests/RowFromDictionaryTests.swift b/Tests/GRDBTests/RowFromDictionaryTests.swift index c4a8362c6c..08ddca00fd 100644 --- a/Tests/GRDBTests/RowFromDictionaryTests.swift +++ b/Tests/GRDBTests/RowFromDictionaryTests.swift @@ -27,7 +27,7 @@ class RowFromDictionaryTests : RowTestCase { } func testRowValueAtIndex() { - let dictionary: [String: DatabaseValueConvertible?] = ["a": 0, "b": 1, "c": 2] + let dictionary: [String: (any DatabaseValueConvertible)?] = ["a": 0, "b": 1, "c": 2] let row = Row(dictionary) let aIndex = dictionary.distance(from: dictionary.startIndex, to: dictionary.index(forKey: "a")!) @@ -123,7 +123,7 @@ class RowFromDictionaryTests : RowTestCase { } func testRowDatabaseValueAtIndex() throws { - let dictionary: [String: DatabaseValueConvertible?] = ["null": nil, "int64": 1, "double": 1.1, "string": "foo", "blob": "SQLite".data(using: .utf8)] + let dictionary: [String: (any DatabaseValueConvertible)?] = ["null": nil, "int64": 1, "double": 1.1, "string": "foo", "blob": "SQLite".data(using: .utf8)] let row = Row(dictionary) let nullIndex = dictionary.distance(from: dictionary.startIndex, to: dictionary.index(forKey: "null")!) @@ -140,7 +140,7 @@ class RowFromDictionaryTests : RowTestCase { } func testRowDatabaseValueNamed() throws { - let dictionary: [String: DatabaseValueConvertible?] = ["null": nil, "int64": 1, "double": 1.1, "string": "foo", "blob": "SQLite".data(using: .utf8)] + let dictionary: [String: (any DatabaseValueConvertible)?] = ["null": nil, "int64": 1, "double": 1.1, "string": "foo", "blob": "SQLite".data(using: .utf8)] let row = Row(dictionary) guard case .null = (row["null"] as DatabaseValue).storage else { XCTFail(); return } diff --git a/Tests/GRDBTests/SQLExpressionIsConstantTests.swift b/Tests/GRDBTests/SQLExpressionIsConstantTests.swift index 9c2f87f955..c4ed35c0b4 100644 --- a/Tests/GRDBTests/SQLExpressionIsConstantTests.swift +++ b/Tests/GRDBTests/SQLExpressionIsConstantTests.swift @@ -274,6 +274,10 @@ class SQLExpressionIsConstantTests: GRDBTestCase { XCTAssertFalse((Column("a") - 2.databaseValue).isConstantInRequest) XCTAssertFalse((1.databaseValue - Column("a")).isConstantInRequest) + // CAST + XCTAssertTrue(cast(1.databaseValue, as: .real).isConstantInRequest) + XCTAssertFalse(cast(Column("a"), as: .real).isConstantInRequest) + // SQLExpressionCollate XCTAssertTrue("foo".databaseValue.collating(.binary).isConstantInRequest) XCTAssertFalse(Column("a").collating(.binary).isConstantInRequest) diff --git a/Tests/GRDBTests/SQLExpressionLiteralTests.swift b/Tests/GRDBTests/SQLExpressionLiteralTests.swift index 5d80056b0a..510b6196dd 100644 --- a/Tests/GRDBTests/SQLExpressionLiteralTests.swift +++ b/Tests/GRDBTests/SQLExpressionLiteralTests.swift @@ -20,7 +20,7 @@ class SQLExpressionLiteralTests: GRDBTestCase { func testWithoutArguments() throws { try DatabaseQueue().inDatabase { db in let expression = Column("foo").collating(.nocase) == "'fooéı👨👨🏿🇫🇷🇨🇮'" && Column("baz") >= 1 - let context = SQLGenerationContext(db, argumentsSink: .forRawSQL) + let context = SQLGenerationContext(db, argumentsSink: .literalValues) let sql = try expression.sql(context, wrappedInParenthesis: true) XCTAssert(context.arguments.isEmpty) XCTAssertEqual(sql, "((\"foo\" = '''fooéı👨👨🏿🇫🇷🇨🇮''' COLLATE NOCASE) AND (\"baz\" >= 1))") diff --git a/Tests/GRDBTests/SQLLiteralTests.swift b/Tests/GRDBTests/SQLLiteralTests.swift index 8e50c6bf6c..e5ef6272b1 100644 --- a/Tests/GRDBTests/SQLLiteralTests.swift +++ b/Tests/GRDBTests/SQLLiteralTests.swift @@ -220,7 +220,7 @@ extension SQLLiteralTests { do { // Existential let query: SQL = """ - SELECT \(AllColumns() as SQLSelectable) + SELECT \(AllColumns() as any SQLSelectable) FROM player """ @@ -234,7 +234,7 @@ extension SQLLiteralTests { do { // Existential let query: SQL = """ - SELECT \(nil as SQLSelectable?) + SELECT \(nil as (any SQLSelectable)?) """ let (sql, arguments) = try query.build(db) @@ -249,6 +249,21 @@ extension SQLLiteralTests { func testTableInterpolation() throws { try makeDatabaseQueue().inDatabase { db in struct Player: TableRecord { } + do { + // Table + let table = Table("player") + let query: SQL = """ + SELECT * + FROM \(table) + """ + + let (sql, arguments) = try query.build(db) + XCTAssertEqual(sql, """ + SELECT * + FROM "player" + """) + XCTAssert(arguments.isEmpty) + } do { // Non-existential let query: SQL = """ @@ -278,7 +293,7 @@ extension SQLLiteralTests { do { // Existential let query: SQL = """ - INSERT INTO \(tableOf: Player() as TableRecord) DEFAULT VALUES + INSERT INTO \(tableOf: Player() as any TableRecord) DEFAULT VALUES """ let (sql, arguments) = try query.build(db) @@ -294,7 +309,7 @@ extension SQLLiteralTests { try makeDatabaseQueue().inDatabase { db in struct Player: TableRecord { } struct AltPlayer: TableRecord { - static let databaseSelection: [SQLSelectable] = [Column("id"), Column("name")] + static let databaseSelection: [any SQLSelectable] = [Column("id"), Column("name")] } do { let query: SQL = """ @@ -484,7 +499,7 @@ extension SQLLiteralTests { try makeDatabaseQueue().inDatabase { db in let set: Set = [1] let array = ["foo", "bar", "baz"] - let expressions: [SQLExpressible] = [Column("a"), Column("b") + 2] + let expressions: [any SQLExpressible] = [Column("a"), Column("b") + 2] let query: SQL = """ SELECT * FROM player WHERE teamId IN \(set) @@ -599,7 +614,7 @@ extension SQLLiteralTests { } try db.create(table: "player") { t in t.autoIncrementedPrimaryKey("id") - t.column("teamId", .integer).references("team") + t.belongsTo("team") t.column("score", .integer) } struct Player: TableRecord { } @@ -727,8 +742,21 @@ extension SQLLiteralTests { do { // Here we test that users can define functions that return - // literal expressions. - func date(_ value: SQLExpressible) -> SQLExpression { + // literal expressions (existential variant). + func date(_ value: any SQLExpressible) -> SQLExpression { + SQL("DATE(\(value))").sqlExpression + } + let createdAt = Column("createdAt") + let request = Player.filter(date(createdAt) == "2020-01-23") + try assertEqualSQL(db, request, """ + SELECT * FROM "player" WHERE (DATE("createdAt")) = '2020-01-23' + """) + } + + do { + // Here we test that users can define functions that return + // literal expressions (generic variant). + func date(_ value: some SQLExpressible) -> SQLExpression { SQL("DATE(\(value))").sqlExpression } let createdAt = Column("createdAt") @@ -741,8 +769,22 @@ extension SQLLiteralTests { do { // Here we test that users can still define functions that // return literal expressions with the previously - // supported technique. - func date(_ value: SQLExpressible) -> SQLExpression { + // supported technique (existential variant). + func date(_ value: any SQLExpressible) -> SQLExpression { + SQL("DATE(\(value.sqlExpression))").sqlExpression + } + let createdAt = Column("createdAt") + let request = Player.filter(date(createdAt) == "2020-01-23") + try assertEqualSQL(db, request, """ + SELECT * FROM "player" WHERE (DATE("createdAt")) = '2020-01-23' + """) + } + + do { + // Here we test that users can still define functions that + // return literal expressions with the previously + // supported technique (generic variant). + func date(_ value: some SQLExpressible) -> SQLExpression { SQL("DATE(\(value.sqlExpression))").sqlExpression } let createdAt = Column("createdAt") @@ -785,10 +827,15 @@ extension SQLLiteralTests { func testProtocolResolution() throws { // SQL can feed ordering, selection, and expressions. - acceptOrderingTerm(SQL("")) - acceptSelectable(SQL("")) - acceptSpecificExpressible(SQL("")) - acceptExpressible(SQL("")) + acceptOrderingTerm_generic(SQL("")) + acceptSelectable_generic(SQL("")) + acceptSpecificExpressible_generic(SQL("")) + acceptExpressible_generic(SQL("")) + + acceptOrderingTerm_existential(SQL("")) + acceptSelectable_existential(SQL("")) + acceptSpecificExpressible_existential(SQL("")) + acceptExpressible_existential(SQL("")) // SQL can build complex expressions and orderings _ = SQL("") + 1 @@ -797,18 +844,24 @@ extension SQLLiteralTests { // Swift String literals are interpreted as String, even when SQL // is an accepted type. // - // should not compile: XCTAssertEqual(acceptOrderingTerm(""), String(describing: String.self)) - // should not compile: XCTAssertEqual(acceptSelectable(""), String(describing: String.self)) - // should not compile: XCTAssertEqual(acceptSpecificExpressible(""), String(describing: String.self)) - XCTAssertEqual(acceptExpressible(""), String(describing: String.self)) - + // should not compile: XCTAssertEqual(acceptOrderingTerm_generic(""), String(describing: String.self)) + // should not compile: XCTAssertEqual(acceptSelectable_generic(""), String(describing: String.self)) + // should not compile: XCTAssertEqual(acceptSpecificExpressible_generic(""), String(describing: String.self)) + XCTAssertEqual(acceptExpressible_generic(""), String(describing: String.self)) + // should not compile: XCTAssertEqual(acceptOrderingTerm_existential(""), String(describing: String.self)) + // should not compile: XCTAssertEqual(acceptSelectable_existential(""), String(describing: String.self)) + // should not compile: XCTAssertEqual(acceptSpecificExpressible_existential(""), String(describing: String.self)) + XCTAssertEqual(acceptExpressible_existential(""), String(describing: String.self)) + // When a literal can be interpreted as an ordering, a selection, or an // expression, then the expression interpretation is favored. // This test targets TableAlias subscript. // - // should not compile: XCTAssertEqual(overloaded(""), "a") - XCTAssertEqual(overloaded(SQL("")), "SQLSpecificExpressible") - + // should not compile: XCTAssertEqual(overloaded_generic(""), "a") + XCTAssertEqual(overloaded_generic(SQL("")), "SQLSpecificExpressible") + // should not compile: XCTAssertEqual(overloaded_existential(""), "a") + XCTAssertEqual(overloaded_existential(SQL("")), "SQLSpecificExpressible") + // In practice: try makeDatabaseQueue().write { db in struct Player: TableRecord { } @@ -835,33 +888,65 @@ extension SQLLiteralTests { // Support for testProtocolResolution() @discardableResult -private func acceptOrderingTerm(_ x: SQLOrderingTerm) -> String { +private func acceptOrderingTerm_generic(_ x: some SQLOrderingTerm) -> String { + String(describing: type(of: x)) +} + +@discardableResult +private func acceptSelectable_generic(_ x: some SQLSelectable) -> String { + String(describing: type(of: x)) +} + +@discardableResult +private func acceptSpecificExpressible_generic(_ x: some SQLSpecificExpressible) -> String { + String(describing: type(of: x)) +} + +@discardableResult +private func acceptExpressible_generic(_ x: some SQLExpressible) -> String { + String(describing: type(of: x)) +} + +private func overloaded_generic(_ x: some SQLOrderingTerm) -> String { + "SQLOrderingTerm" +} + +private func overloaded_generic(_ x: some SQLSelectable) -> String { + "SQLSelectable" +} + +private func overloaded_generic(_ x: some SQLSpecificExpressible & SQLSelectable & SQLOrderingTerm) -> String { + "SQLSpecificExpressible" +} + +@discardableResult +private func acceptOrderingTerm_existential(_ x: any SQLOrderingTerm) -> String { String(describing: type(of: x)) } @discardableResult -private func acceptSelectable(_ x: SQLSelectable) -> String { +private func acceptSelectable_existential(_ x: any SQLSelectable) -> String { String(describing: type(of: x)) } @discardableResult -private func acceptSpecificExpressible(_ x: SQLSpecificExpressible) -> String { +private func acceptSpecificExpressible_existential(_ x: any SQLSpecificExpressible) -> String { String(describing: type(of: x)) } @discardableResult -private func acceptExpressible(_ x: SQLExpressible) -> String { +private func acceptExpressible_existential(_ x: any SQLExpressible) -> String { String(describing: type(of: x)) } -private func overloaded(_ x: SQLOrderingTerm) -> String { +private func overloaded_existential(_ x: any SQLOrderingTerm) -> String { "SQLOrderingTerm" } -private func overloaded(_ x: SQLSelectable) -> String { +private func overloaded_existential(_ x: any SQLSelectable) -> String { "SQLSelectable" } -private func overloaded(_ x: SQLSpecificExpressible & SQLSelectable & SQLOrderingTerm) -> String { +private func overloaded_existential(_ x: any SQLSpecificExpressible & SQLSelectable & SQLOrderingTerm) -> String { "SQLSpecificExpressible" } diff --git a/Tests/GRDBTests/SelectStatementTests.swift b/Tests/GRDBTests/SelectStatementTests.swift index 959a1d4f43..b3263d651f 100644 --- a/Tests/GRDBTests/SelectStatementTests.swift +++ b/Tests/GRDBTests/SelectStatementTests.swift @@ -98,7 +98,7 @@ class SelectStatementTests : GRDBTestCase { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in let statement = try db.makeStatement(sql: "SELECT COUNT(*) FROM persons WHERE age < :age") - let ageDicts: [[String: DatabaseValueConvertible?]] = [["age": 20], ["age": 30], ["age": 40], ["age": 50]] + let ageDicts: [[String: (any DatabaseValueConvertible)?]] = [["age": 20], ["age": 30], ["age": 40], ["age": 50]] let counts = try ageDicts.map { dic -> Int in // Make sure we don't trigger a failible initializer let arguments: StatementArguments = StatementArguments(dic) @@ -112,7 +112,7 @@ class SelectStatementTests : GRDBTestCase { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in let statement = try db.makeStatement(sql: "SELECT COUNT(*) FROM persons WHERE age < :age") - let ageDicts: [[String: DatabaseValueConvertible?]] = [["age": 20], ["age": 30], ["age": 40], ["age": 50]] + let ageDicts: [[String: (any DatabaseValueConvertible)?]] = [["age": 20], ["age": 30], ["age": 40], ["age": 50]] let counts = try ageDicts.map { ageDict -> Int in statement.arguments = StatementArguments(ageDict) return try Int.fetchOne(statement)! diff --git a/Tests/GRDBTests/SharedValueObservationTests.swift b/Tests/GRDBTests/SharedValueObservationTests.swift index e510fc73ee..ca53c61d98 100644 --- a/Tests/GRDBTests/SharedValueObservationTests.swift +++ b/Tests/GRDBTests/SharedValueObservationTests.swift @@ -104,7 +104,7 @@ class SharedValueObservationTests: GRDBTestCase { #if canImport(Combine) func test_immediate_publisher() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -381,7 +381,7 @@ class SharedValueObservationTests: GRDBTestCase { #if canImport(Combine) func test_async_publisher() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -511,7 +511,7 @@ class SharedValueObservationTests: GRDBTestCase { #if canImport(Combine) func test_error_recovery_observationLifetime() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -567,7 +567,7 @@ class SharedValueObservationTests: GRDBTestCase { #if canImport(Combine) func test_error_recovery_whileObserved() throws { - guard #available(OSX 10.15, iOS 13, tvOS 13, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Combine is not available") } @@ -621,7 +621,7 @@ class SharedValueObservationTests: GRDBTestCase { } #endif - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait() async throws { let dbQueue = try makeDatabaseQueue() try await dbQueue.write { db in diff --git a/Tests/GRDBTests/SingletonRecordTest.swift b/Tests/GRDBTests/SingletonRecordTest.swift index 13bf623840..a3fb0766ee 100644 --- a/Tests/GRDBTests/SingletonRecordTest.swift +++ b/Tests/GRDBTests/SingletonRecordTest.swift @@ -31,7 +31,7 @@ extension AppConfiguration: FetchableRecord, PersistableRecord { /// Returns the persisted configuration, or the default one if the /// database table is empty. - static func fetch(_ db: Database) throws -> AppConfiguration { + static func find(_ db: Database) throws -> AppConfiguration { try fetchOne(db) ?? .default } } @@ -54,7 +54,7 @@ class SingletonRecordTest: GRDBTestCase { // Given try createEmptyAppConfigurationTable(db) // When - let config = try AppConfiguration.fetch(db) + let config = try AppConfiguration.find(db) // Then XCTAssertEqual(config.text, "default") } @@ -66,7 +66,7 @@ class SingletonRecordTest: GRDBTestCase { try createEmptyAppConfigurationTable(db) try AppConfiguration(text: "initial").insert(db) // When - let config = try AppConfiguration.fetch(db) + let config = try AppConfiguration.find(db) // Then XCTAssertEqual(config.text, "initial") } @@ -79,7 +79,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").insert(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -93,7 +93,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").insert(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -106,7 +106,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").update(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -120,7 +120,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").update(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -131,12 +131,12 @@ class SingletonRecordTest: GRDBTestCase { // Given try createEmptyAppConfigurationTable(db) // When - var config = try AppConfiguration.fetch(db) + var config = try AppConfiguration.find(db) try config.updateChanges(db) { $0.text = "test" } // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -148,12 +148,12 @@ class SingletonRecordTest: GRDBTestCase { try createEmptyAppConfigurationTable(db) try AppConfiguration(text: "initial").insert(db) // When - var config = try AppConfiguration.fetch(db) + var config = try AppConfiguration.find(db) try config.updateChanges(db) { $0.text = "test" } // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -166,7 +166,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").save(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -180,7 +180,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").save(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -192,7 +192,7 @@ class SingletonRecordTest: GRDBTestCase { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -203,7 +203,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").upsert(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } @@ -215,7 +215,7 @@ class SingletonRecordTest: GRDBTestCase { throw XCTSkip("UPSERT is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("UPSERT is not available") } #endif @@ -227,7 +227,7 @@ class SingletonRecordTest: GRDBTestCase { // When try AppConfiguration(text: "test").upsert(db) // Then - try XCTAssertEqual(AppConfiguration.fetch(db).text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) XCTAssertEqual(row, ["id": 1, "text": "test"]) } diff --git a/Tests/GRDBTests/SingletonUserDefaultsTest.swift b/Tests/GRDBTests/SingletonUserDefaultsTest.swift new file mode 100644 index 0000000000..7db9f80e31 --- /dev/null +++ b/Tests/GRDBTests/SingletonUserDefaultsTest.swift @@ -0,0 +1,174 @@ +import XCTest +import GRDB + +private struct AppConfiguration: Codable { + // Support for the single row guarantee + private var id = 1 + + // The stored properties + private var storedText: String? + // ... other properties + + // The public properties + var text: String { + get { storedText ?? "default" } + set { storedText = newValue } + } + + mutating func resetText() { + storedText = nil + } +} + +extension AppConfiguration { + /// The default configuration + static let `default` = AppConfiguration() +} + +// Database Access +extension AppConfiguration: FetchableRecord, PersistableRecord { + // Customize the default PersistableRecord behavior + func willUpdate(_ db: Database, columns: Set) throws { + // Insert the default configuration if it does not exist yet. + if try !exists(db) { + try AppConfiguration.default.insert(db) + } + } + + /// Returns the persisted configuration, or the default one if the + /// database table is empty. + static func find(_ db: Database) throws -> AppConfiguration { + try fetchOne(db) ?? .default + } +} + +class SingletonUserDefaultsTest: GRDBTestCase { + private func createEmptyAppConfigurationTable(_ db: Database) throws { + // Table creation + try db.create(table: "appConfiguration") { t in + // Single row guarantee + t.primaryKey("id", .integer, onConflict: .replace).check { $0 == 1 } + + // The configuration columns + t.column("storedText", .text) + // ... other columns + } + } + + func test_find_in_empty_database() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + // When + let config = try AppConfiguration.find(db) + // Then + XCTAssertEqual(config.text, "default") + } + } + + func test_find_in_populated_database_null() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + try AppConfiguration().insert(db) + // When + let config = try AppConfiguration.find(db) + // Then + XCTAssertEqual(config.text, "default") + } + } + + func test_find_from_populated_database_not_null() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + try db.execute(sql: "INSERT INTO appConfiguration(storedText) VALUES ('initial')") + // When + let config = try AppConfiguration.find(db) + // Then + XCTAssertEqual(config.text, "initial") + } + } + + func test_save_in_empty_database() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + // When + var appConfiguration = try AppConfiguration.find(db) + appConfiguration.text = "test" + try appConfiguration.save(db) + // Then + try XCTAssertEqual(AppConfiguration.find(db).text, "test") + let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) + XCTAssertEqual(row, ["id": 1, "storedText": "test"]) + } + } + + func test_save_in_populated_database() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + try db.execute(sql: "INSERT INTO appConfiguration(storedText) VALUES ('initial')") + // When + var appConfiguration = try AppConfiguration.find(db) + appConfiguration.text = "test" + try appConfiguration.save(db) + // Then + try XCTAssertEqual(AppConfiguration.find(db).text, "test") + let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) + XCTAssertEqual(row, ["id": 1, "storedText": "test"]) + } + } + + func test_reset_and_save_in_populated_database() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + try db.execute(sql: "INSERT INTO appConfiguration(storedText) VALUES ('initial')") + // When + var appConfiguration = try AppConfiguration.find(db) + appConfiguration.resetText() + try appConfiguration.save(db) + // Then + try XCTAssertEqual(AppConfiguration.find(db).text, "default") + let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) + XCTAssertEqual(row, ["id": 1, "storedText": nil]) + } + } + + func test_update_changes_in_empty_database() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + // When + var config = try AppConfiguration.find(db) + try config.updateChanges(db) { + $0.text = "test" + } + // Then + XCTAssertEqual(config.text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") + let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) + XCTAssertEqual(row, ["id": 1, "storedText": "test"]) + } + } + + func test_update_changes_in_populated_database() throws { + try makeDatabaseQueue().write { db in + // Given + try createEmptyAppConfigurationTable(db) + try db.execute(sql: "INSERT INTO appConfiguration(storedText) VALUES ('initial')") + // When + var config = try AppConfiguration.find(db) + try config.updateChanges(db) { + $0.text = "test" + } + // Then + XCTAssertEqual(config.text, "test") + try XCTAssertEqual(AppConfiguration.find(db).text, "test") + let row = try XCTUnwrap(Row.fetchOne(db, sql: "SELECT * FROM appConfiguration")) + XCTAssertEqual(row, ["id": 1, "storedText": "test"]) + } + } +} diff --git a/Tests/GRDBTests/StatementArguments+FoundationTests.swift b/Tests/GRDBTests/StatementArguments+FoundationTests.swift index 61ec4b075d..3d5094eb5f 100644 --- a/Tests/GRDBTests/StatementArguments+FoundationTests.swift +++ b/Tests/GRDBTests/StatementArguments+FoundationTests.swift @@ -47,14 +47,14 @@ class StatementArgumentsFoundationTests: GRDBTestCase { } } - func testStatementArgumentsNSArrayInitializerFromInvalidNSArray() { - let persons = [ // NSArray, because of the heterogeneous values - ["Arthur", NonDatabaseConvertibleObject()], - ["Barbara", NonDatabaseConvertibleObject()], - ] - - for person in persons { - XCTAssertNil(StatementArguments(person)) + func testStatementArgumentsInitializerFromInvalidArray() { + do { + let array: [Any] = ["Arthur", NonDatabaseConvertibleObject()] + XCTAssertNil(StatementArguments(array)) + } + do { + let array: NSArray = ["Arthur", NonDatabaseConvertibleObject()] + XCTAssertNil(StatementArguments(array as! [Any])) } } diff --git a/Tests/GRDBTests/StatementArgumentsTests.swift b/Tests/GRDBTests/StatementArgumentsTests.swift index de0ab55ae9..41f873d0ee 100644 --- a/Tests/GRDBTests/StatementArgumentsTests.swift +++ b/Tests/GRDBTests/StatementArgumentsTests.swift @@ -33,7 +33,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments([]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -42,7 +42,7 @@ class StatementArgumentsTests: GRDBTestCase { // Two few arguments try statement.validateArguments(["foo"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -51,7 +51,7 @@ class StatementArgumentsTests: GRDBTestCase { // Two many arguments try statement.validateArguments(["foo", 1, "bar"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -60,7 +60,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments([:]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -69,7 +69,7 @@ class StatementArgumentsTests: GRDBTestCase { // Unmappable arguments try statement.validateArguments(["firstName": "foo", "age": 1]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -81,7 +81,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments([name, age] as [DatabaseValueConvertible?]) + let arguments = StatementArguments([name, age] as [(any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, age) VALUES (?, ?)") updateStatement.arguments = arguments @@ -101,7 +101,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments([name, age] as [DatabaseValueConvertible?]) + let arguments = StatementArguments([name, age] as [(any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, age) VALUES (?, ?)") try updateStatement.setArguments(arguments) @@ -117,7 +117,7 @@ class StatementArgumentsTests: GRDBTestCase { do { try updateStatement.setArguments([1]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { XCTAssertEqual(updateStatement.arguments, arguments) } catch { XCTFail("Unexpected error: \(error)") @@ -126,7 +126,7 @@ class StatementArgumentsTests: GRDBTestCase { do { try selectStatement.setArguments([1]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { XCTAssertEqual(selectStatement.arguments, arguments) } catch { XCTFail("Unexpected error: \(error)") @@ -139,7 +139,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments([name, age] as [DatabaseValueConvertible?]) + let arguments = StatementArguments([name, age] as [(any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, age) VALUES (?, ?)") updateStatement.setUncheckedArguments(arguments) @@ -184,7 +184,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments([]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -193,7 +193,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments(["foo"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -202,7 +202,7 @@ class StatementArgumentsTests: GRDBTestCase { // Too many arguments try statement.validateArguments(["foo", 1, "baz"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -211,7 +211,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments([:]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -220,7 +220,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments(["firstName": "foo"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -232,7 +232,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments(["name": name, "age": age] as [String: DatabaseValueConvertible?]) + let arguments = StatementArguments(["name": name, "age": age] as [String: (any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, age) VALUES (:name, :age)") updateStatement.arguments = arguments @@ -252,7 +252,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments(["name": name, "age": age] as [String: DatabaseValueConvertible?]) + let arguments = StatementArguments(["name": name, "age": age] as [String: (any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, age) VALUES (:name, :age)") try updateStatement.setArguments(arguments) @@ -268,7 +268,7 @@ class StatementArgumentsTests: GRDBTestCase { do { try updateStatement.setArguments(["name": name]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { XCTAssertEqual(updateStatement.arguments, arguments) } catch { XCTFail("Unexpected error: \(error)") @@ -277,7 +277,7 @@ class StatementArgumentsTests: GRDBTestCase { do { try selectStatement.setArguments(["name": name]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { XCTAssertEqual(selectStatement.arguments, arguments) } catch { XCTFail("Unexpected error: \(error)") @@ -290,7 +290,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments(["name": name, "age": age] as [String: DatabaseValueConvertible?]) + let arguments = StatementArguments(["name": name, "age": age] as [String: (any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, age) VALUES (:name, :age)") updateStatement.setUncheckedArguments(arguments) @@ -343,7 +343,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments([]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -352,7 +352,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments(["foo"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -361,7 +361,7 @@ class StatementArgumentsTests: GRDBTestCase { // Too many arguments try statement.validateArguments(["foo", 1, "baz"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -370,7 +370,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments([:]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -379,7 +379,7 @@ class StatementArgumentsTests: GRDBTestCase { // Missing arguments try statement.validateArguments(["name": "foo"]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { } catch { XCTFail("Unexpected error: \(error)") } @@ -392,7 +392,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments(["name": name, "age": age] as [String: DatabaseValueConvertible?]) + let arguments = StatementArguments(["name": name, "age": age] as [String: (any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, lastName, age) VALUES (:name, :name, :age)") updateStatement.arguments = arguments @@ -412,7 +412,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments(["name": name, "age": age] as [String: DatabaseValueConvertible?]) + let arguments = StatementArguments(["name": name, "age": age] as [String: (any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, lastName, age) VALUES (:name, :name, :age)") try updateStatement.setArguments(arguments) @@ -428,7 +428,7 @@ class StatementArgumentsTests: GRDBTestCase { do { try updateStatement.setArguments(["name": name]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { XCTAssertEqual(updateStatement.arguments, arguments) } catch { XCTFail("Unexpected error: \(error)") @@ -437,7 +437,7 @@ class StatementArgumentsTests: GRDBTestCase { do { try selectStatement.setArguments(["name": name]) XCTFail("Expected error") - } catch is DatabaseError { + } catch DatabaseError.SQLITE_MISUSE { XCTAssertEqual(selectStatement.arguments, arguments) } catch { XCTFail("Unexpected error: \(error)") @@ -450,7 +450,7 @@ class StatementArgumentsTests: GRDBTestCase { try dbQueue.inDatabase { db in let name = "Arthur" let age = 42 - let arguments = StatementArguments(["name": name, "age": age] as [String: DatabaseValueConvertible?]) + let arguments = StatementArguments(["name": name, "age": age] as [String: (any DatabaseValueConvertible)?]) let updateStatement = try db.makeStatement(sql: "INSERT INTO persons (firstName, lastName, age) VALUES (:name, :name, :age)") updateStatement.setUncheckedArguments(arguments) diff --git a/Tests/GRDBTests/TableDefinitionTests.swift b/Tests/GRDBTests/TableDefinitionTests.swift index ffefeb44ad..0092b26d79 100644 --- a/Tests/GRDBTests/TableDefinitionTests.swift +++ b/Tests/GRDBTests/TableDefinitionTests.swift @@ -362,7 +362,7 @@ class TableDefinitionTests: GRDBTestCase { throw XCTSkip("Generated columns are not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("Generated columns are not available") } #endif @@ -545,6 +545,33 @@ class TableDefinitionTests: GRDBTestCase { } } + @available(*, deprecated) + func testTableCheck_deprecated() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "test") { t in + // Deprecated because this does not what the user means! + t.check("a < b") + t.column("a", .integer) + t.column("b", .integer) + } + assertEqualSQL(lastSQLQuery!, """ + CREATE TABLE "test" (\ + "a" INTEGER, \ + "b" INTEGER, \ + CHECK ('a < b')\ + ) + """) + + // Sanity check: insert should fail because the 'a < b' string is false for SQLite + do { + try db.execute(sql: "INSERT INTO test (a, b) VALUES (0, 1)") + XCTFail() + } catch { + } + } + } + func testConstraintLiteral() throws { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -651,11 +678,22 @@ class TableDefinitionTests: GRDBTestCase { try db.create(table: "test4") { t in t.column("parent", .integer).references("test4") } - assertEqualSQL( - lastSQLQuery!, - ("CREATE TABLE \"test4\" (" + - "\"parent\" INTEGER REFERENCES \"test4\"(\"rowid\")" + - ")") as String) + assertEqualSQL(lastSQLQuery!, """ + CREATE TABLE "test4" (\ + "parent" INTEGER REFERENCES "test4"("rowid")\ + ) + """) + + try db.create(table: "test5") { t in + t.column("parent", .integer) + t.foreignKey(["parent"], references: "test5") + } + assertEqualSQL(lastSQLQuery!, """ + CREATE TABLE "test5" (\ + "parent" INTEGER, \ + FOREIGN KEY ("parent") REFERENCES "test5"("rowid")\ + ) + """) } } @@ -705,6 +743,40 @@ class TableDefinitionTests: GRDBTestCase { } } + func testAlterTableAddAutoReferencingForeignKey() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + do { + try db.create(table: "hiddenRowIdTable") { t in + t.column("a", .text) + } + + sqlQueries.removeAll() + try db.alter(table: "hiddenRowIdTable") { t in + t.add(column: "ref").references("hiddenRowIdTable") + } + XCTAssertEqual(lastSQLQuery, """ + ALTER TABLE "hiddenRowIdTable" ADD COLUMN "ref" REFERENCES "hiddenRowIdTable"("rowid") + """) + } + + do { + try db.create(table: "explicitPrimaryKey") { t in + t.primaryKey("code", .text) + t.column("a", .text) + } + + sqlQueries.removeAll() + try db.alter(table: "explicitPrimaryKey") { t in + t.add(column: "ref").references("explicitPrimaryKey") + } + XCTAssertEqual(lastSQLQuery, """ + ALTER TABLE "explicitPrimaryKey" ADD COLUMN "ref" REFERENCES "explicitPrimaryKey"("code") + """) + } + } + } + func testAlterTableAddColumnInvalidatesSchemaCache() throws { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -725,7 +797,7 @@ class TableDefinitionTests: GRDBTestCase { throw XCTSkip("ALTER TABLE RENAME COLUMN is not available") } #if !GRDBCUSTOMSQLITE && !GRDBCIPHER - guard #available(iOS 13.0, tvOS 13.0, watchOS 6.0, *) else { + guard #available(iOS 13, tvOS 13, watchOS 6, *) else { throw XCTSkip("ALTER TABLE RENAME COLUMN is not available") } #endif @@ -753,7 +825,7 @@ class TableDefinitionTests: GRDBTestCase { throw XCTSkip("ALTER TABLE RENAME COLUMN is not available") } #if !GRDBCUSTOMSQLITE && !GRDBCIPHER - guard #available(iOS 13.0, tvOS 13.0, watchOS 6.0, *) else { + guard #available(iOS 13, tvOS 13, watchOS 6, *) else { throw XCTSkip("ALTER TABLE RENAME COLUMN is not available") } #endif @@ -777,7 +849,7 @@ class TableDefinitionTests: GRDBTestCase { throw XCTSkip("Generated columns are not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("Generated columns are not available") } #endif @@ -812,7 +884,7 @@ class TableDefinitionTests: GRDBTestCase { throw XCTSkip("ALTER TABLE DROP COLUMN is not available") } #if !GRDBCUSTOMSQLITE && !GRDBCIPHER - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("ALTER TABLE DROP COLUMN is not available") } #endif @@ -836,7 +908,7 @@ class TableDefinitionTests: GRDBTestCase { throw XCTSkip("ALTER TABLE DROP COLUMN is not available") } #if !GRDBCUSTOMSQLITE && !GRDBCIPHER - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("ALTER TABLE DROP COLUMN is not available") } #endif @@ -889,8 +961,37 @@ class TableDefinitionTests: GRDBTestCase { try db.create(index: "test_on_a_b_2", on: "test", columns: ["a", "b"], options: [.unique, .ifNotExists]) assertEqualSQL(lastSQLQuery!, "CREATE UNIQUE INDEX IF NOT EXISTS \"test_on_a_b_2\" ON \"test\"(\"a\", \"b\")") + try db.create(index: "test_on_a_plus_b", on: "test", expressions: [Column("a") + Column("b")], options: [.unique, .ifNotExists]) + assertEqualSQL(lastSQLQuery!, "CREATE UNIQUE INDEX IF NOT EXISTS \"test_on_a_plus_b\" ON \"test\"(\"a\" + \"b\")") + + try db.create(index: "test_on_a_nocase", on: "test", expressions: [Column("a").collating(.nocase)], options: [.unique, .ifNotExists]) + assertEqualSQL(lastSQLQuery!, "CREATE UNIQUE INDEX IF NOT EXISTS \"test_on_a_nocase\" ON \"test\"(\"a\" COLLATE NOCASE)") + + // Sanity check + XCTAssertEqual(try Set(db.indexes(on: "test").map(\.name)), ["test_on_a", "test_on_a_b", "test_on_a_b_2", "test_on_a_nocase"]) + } + } + + func testCreateIndexOn() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "test") { t in + t.primaryKey("id", .integer) + t.column("a", .text) + t.column("b", .text) + } + + try db.create(indexOn: "test", columns: ["a"]) + assertEqualSQL(lastSQLQuery!, "CREATE INDEX \"index_test_on_a\" ON \"test\"(\"a\")") + + try db.create(indexOn: "test", columns: ["a", "b"], options: [.unique]) + assertEqualSQL(lastSQLQuery!, "CREATE UNIQUE INDEX \"index_test_on_a_b\" ON \"test\"(\"a\", \"b\")") + + try db.create(indexOn: "test", columns: ["b"], options: [.unique, .ifNotExists]) + assertEqualSQL(lastSQLQuery!, "CREATE UNIQUE INDEX IF NOT EXISTS \"index_test_on_b\" ON \"test\"(\"b\")") + // Sanity check - XCTAssertEqual(try Set(db.indexes(on: "test").map(\.name)), ["test_on_a", "test_on_a_b", "test_on_a_b_2"]) + XCTAssertEqual(try Set(db.indexes(on: "test").map(\.name)), ["index_test_on_a", "index_test_on_a_b", "index_test_on_b"]) } } @@ -906,6 +1007,9 @@ class TableDefinitionTests: GRDBTestCase { try db.create(index: "test_on_a_b", on: "test", columns: ["a", "b"], options: [.unique, .ifNotExists], condition: Column("a") == 1) assertEqualSQL(lastSQLQuery!, "CREATE UNIQUE INDEX IF NOT EXISTS \"test_on_a_b\" ON \"test\"(\"a\", \"b\") WHERE \"a\" = 1") + try db.create(index: "test_on_a_plus_b", on: "test", expressions: [Column("a") + Column("b")], options: [.unique, .ifNotExists], condition: Column("a") == 1) + assertEqualSQL(lastSQLQuery!, "CREATE UNIQUE INDEX IF NOT EXISTS \"test_on_a_plus_b\" ON \"test\"(\"a\" + \"b\") WHERE \"a\" = 1") + // Sanity check XCTAssertEqual(try Set(db.indexes(on: "test").map(\.name)), ["test_on_a_b"]) } @@ -928,6 +1032,51 @@ class TableDefinitionTests: GRDBTestCase { } } + func testDropIndexOn() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "test") { t in + t.primaryKey("id", .integer) + t.column("a", .text) + t.column("b", .text) + t.column("c", .text) + t.column("d", .text) + } + try db.create(index: "custom_name_a", on: "test", columns: ["a"]) + try db.create(index: "custom_name_b", on: "test", columns: ["a", "b"]) + try db.create(indexOn: "test", columns: ["c"]) + try db.create(indexOn: "test", columns: ["c", "d"]) + + // Custom name + try db.drop(indexOn: "test", columns: ["a"]) + assertEqualSQL(lastSQLQuery!, "DROP INDEX \"custom_name_a\"") + + // Custom name, case insensitivity + try db.drop(indexOn: "TEST", columns: ["A", "B"]) + assertEqualSQL(lastSQLQuery!, "DROP INDEX \"custom_name_b\"") + + // Default name + try db.drop(indexOn: "test", columns: ["c"]) + assertEqualSQL(lastSQLQuery!, "DROP INDEX \"index_test_on_c\"") + + // Default name, case insensitivity + try db.drop(indexOn: "TEST", columns: ["C", "D"]) + assertEqualSQL(lastSQLQuery!, "DROP INDEX \"index_test_on_c_d\"") + + // Non existing index: no error + try db.drop(indexOn: "test", columns: ["a", "b", "c", "d"]) + + // Non existing table: error + do { + try db.drop(indexOn: "missing", columns: ["a"]) + XCTFail("Expected error") + } catch { } + + // Sanity check + XCTAssertTrue(try db.indexes(on: "test").isEmpty) + } + } + func testReindex() throws { let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -938,4 +1087,99 @@ class TableDefinitionTests: GRDBTestCase { assertEqualSQL(lastSQLQuery!, "REINDEX swiftLocalizedCompare") } } + + func testCreateView() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("name") + t.column("score") + } + + do { + let request = SQLRequest(literal: """ + SELECT * FROM player WHERE name = \("O'Brien") + """) + try db.create(view: "view1", as: request) + assertEqualSQL(lastSQLQuery!, """ + CREATE VIEW "view1" AS SELECT * FROM player WHERE name = 'O''Brien' + """) + } + + do { + let request = Table("player").filter(Column("name") == "O'Brien") + try db.create(view: "view2", as: request) + assertEqualSQL(lastSQLQuery!, """ + CREATE VIEW "view2" AS SELECT * FROM "player" WHERE "name" = 'O''Brien' + """) + } + + do { + let sql: SQL = """ + SELECT * FROM player WHERE name = \("O'Brien") + """ + try db.create(view: "view3", asLiteral: sql) + assertEqualSQL(lastSQLQuery!, """ + CREATE VIEW "view3" AS SELECT * FROM player WHERE name = 'O''Brien' + """) + } + } + } + + func testCreateViewOptions() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("name") + t.column("score") + } + + let request = SQLRequest(literal: """ + SELECT * FROM player WHERE name = \("O'Brien") + """) + + do { + try db.create(view: "view1", options: .ifNotExists, as: request) + assertEqualSQL(lastSQLQuery!, """ + CREATE VIEW IF NOT EXISTS "view1" AS SELECT * FROM player WHERE name = 'O''Brien' + """) + } + + do { + try db.create(view: "view2", options: .temporary, as: request) + assertEqualSQL(lastSQLQuery!, """ + CREATE TEMPORARY VIEW "view2" AS SELECT * FROM player WHERE name = 'O''Brien' + """) + } + + do { + try db.create(view: "view3", options: [.temporary, .ifNotExists], as: request) + assertEqualSQL(lastSQLQuery!, """ + CREATE TEMPORARY VIEW IF NOT EXISTS "view3" AS SELECT * FROM player WHERE name = 'O''Brien' + """) + } + + do { + try db.create(view: "view4", columns: ["a", "b", "c"], as: request) + assertEqualSQL(lastSQLQuery!, """ + CREATE VIEW "view4" ("a", "b", "c") AS SELECT * FROM player WHERE name = 'O''Brien' + """) + } + } + } + + func testDropView() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.inDatabase { db in + try db.create(view: "test", as: SQLRequest(literal: "SELECT 'test', 42")) + XCTAssertTrue(try db.viewExists("test")) + XCTAssertEqual(try db.columns(in: "test").count, 2) + + try db.drop(view: "test") + assertEqualSQL(lastSQLQuery!, "DROP VIEW \"test\"") + XCTAssertFalse(try db.viewExists("test")) + } + } } diff --git a/Tests/GRDBTests/TableRecord+QueryInterfaceRequestTests.swift b/Tests/GRDBTests/TableRecord+QueryInterfaceRequestTests.swift index 19182f7aac..a141e18e91 100644 --- a/Tests/GRDBTests/TableRecord+QueryInterfaceRequestTests.swift +++ b/Tests/GRDBTests/TableRecord+QueryInterfaceRequestTests.swift @@ -259,7 +259,7 @@ class TableRecordQueryInterfaceRequestTests: GRDBTestCase { sql(dbQueue, Reader.order(Col.age.descNullsFirst)), "SELECT * FROM \"readers\" ORDER BY \"age\" DESC NULLS FIRST") #elseif !GRDBCIPHER - if #available(OSX 10.16, iOS 14, tvOS 14, watchOS 7, *) { + if #available(iOS 14, macOS 10.16, tvOS 14, watchOS 7, *) { XCTAssertEqual( sql(dbQueue, Reader.order(Col.age.ascNullsLast)), "SELECT * FROM \"readers\" ORDER BY \"age\" ASC NULLS LAST") @@ -357,7 +357,7 @@ class TableRecordQueryInterfaceRequestTests: GRDBTestCase { } func testExistsIdentifiable() throws { - guard #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) else { + guard #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) else { throw XCTSkip("Identifiable is not available") } diff --git a/Tests/GRDBTests/TableRecordDeleteTests.swift b/Tests/GRDBTests/TableRecordDeleteTests.swift index ee8f210113..88e9c81745 100644 --- a/Tests/GRDBTests/TableRecordDeleteTests.swift +++ b/Tests/GRDBTests/TableRecordDeleteTests.swift @@ -6,7 +6,7 @@ private struct Hacker : TableRecord { var id: Int64? // Optional } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Hacker: Identifiable { } private struct Person : Codable, PersistableRecord, FetchableRecord, Hashable { @@ -16,7 +16,7 @@ private struct Person : Codable, PersistableRecord, FetchableRecord, Hashable { var email: String } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Person: Identifiable { } private struct Citizenship : TableRecord { @@ -46,7 +46,7 @@ class TableRecordDeleteTests: GRDBTestCase { XCTAssertTrue(deleted) XCTAssertEqual(try Hacker.fetchCount(db), 0) - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { try db.execute(sql: "INSERT INTO hackers (rowid, name) VALUES (?, ?)", arguments: [1, "Arthur"]) try XCTAssertFalse(Hacker.deleteOne(db, id: nil)) deleted = try Hacker.deleteOne(db, id: 1) @@ -62,7 +62,7 @@ class TableRecordDeleteTests: GRDBTestCase { XCTAssertEqual(deletedCount, 2) XCTAssertEqual(try Hacker.fetchCount(db), 1) - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { try db.execute(sql: "INSERT INTO hackers (rowid, name) VALUES (?, ?)", arguments: [2, "Barbara"]) try db.execute(sql: "INSERT INTO hackers (rowid, name) VALUES (?, ?)", arguments: [3, "Craig"]) let deletedCount = try Hacker.deleteAll(db, ids: [2, 3, 4]) @@ -85,7 +85,7 @@ class TableRecordDeleteTests: GRDBTestCase { XCTAssertTrue(deleted) XCTAssertEqual(try Person.fetchCount(db), 0) - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { try db.execute(sql: "INSERT INTO persons (id, name, email) VALUES (?, ?, ?)", arguments: [1, "Arthur", "arthur@example.com"]) deleted = try Person.deleteOne(db, id: 1) XCTAssertTrue(deleted) @@ -100,7 +100,7 @@ class TableRecordDeleteTests: GRDBTestCase { XCTAssertEqual(deletedCount, 2) XCTAssertEqual(try Person.fetchCount(db), 1) - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { try db.execute(sql: "INSERT INTO persons (id, name, email) VALUES (?, ?, ?)", arguments: [2, "Barbara", "barbara@example.com"]) try db.execute(sql: "INSERT INTO persons (id, name, email) VALUES (?, ?, ?)", arguments: [3, "Craig", "craig@example.com"]) let deletedCount = try Person.deleteAll(db, ids: [2, 3, 4]) @@ -190,7 +190,7 @@ class TableRecordDeleteTests: GRDBTestCase { try Person.filter(keys: [1, 2]).deleteAll(db) XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE \"id\" IN (1, 2)") - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { try Person.filter(id: 1).deleteAll(db) XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE \"id\" = 1") @@ -235,7 +235,7 @@ class TableRecordDeleteTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -255,7 +255,7 @@ class TableRecordDeleteTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -277,15 +277,23 @@ class TableRecordDeleteTests: GRDBTestCase { _ = try Person.filter(keys: [1, 2]).deleteAndFetchCursor(db).next() XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE \"id\" IN (1, 2) RETURNING *") - - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + +#if GRDBCUSTOMSQLITE || GRDBCIPHER + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { _ = try Person.filter(id: 1).deleteAndFetchCursor(db).next() XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE \"id\" = 1 RETURNING *") _ = try Person.filter(ids: [1, 2]).deleteAndFetchCursor(db).next() XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE \"id\" IN (1, 2) RETURNING *") } - +#else + _ = try Person.filter(id: 1).deleteAndFetchCursor(db).next() + XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE \"id\" = 1 RETURNING *") + + _ = try Person.filter(ids: [1, 2]).deleteAndFetchCursor(db).next() + XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE \"id\" IN (1, 2) RETURNING *") +#endif + _ = try Person.filter(sql: "id = 1").deleteAndFetchCursor(db).next() XCTAssertEqual(self.lastSQLQuery, "DELETE FROM \"persons\" WHERE id = 1 RETURNING *") @@ -308,7 +316,7 @@ class TableRecordDeleteTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -336,7 +344,7 @@ class TableRecordDeleteTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -374,7 +382,7 @@ class TableRecordDeleteTests: GRDBTestCase { try db.create(table: "player") { t in t.autoIncrementedPrimaryKey("id") - t.column("teamId", .integer).references("team") + t.belongsTo("team") } do { @@ -422,7 +430,7 @@ class TableRecordDeleteTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -449,7 +457,7 @@ class TableRecordDeleteTests: GRDBTestCase { try db.create(table: "player") { t in t.autoIncrementedPrimaryKey("id") - t.column("teamId", .integer).references("team") + t.belongsTo("team") } do { @@ -589,7 +597,7 @@ class TableRecordDeleteTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif diff --git a/Tests/GRDBTests/TableRecordTests.swift b/Tests/GRDBTests/TableRecordTests.swift index d463ba2654..f59fcd1c5e 100644 --- a/Tests/GRDBTests/TableRecordTests.swift +++ b/Tests/GRDBTests/TableRecordTests.swift @@ -103,7 +103,7 @@ class TableRecordTests: GRDBTestCase { func testExtendedDatabaseSelection() throws { struct Record: TableRecord { static let databaseTableName = "t1" - static let databaseSelection: [SQLSelectable] = [AllColumns(), Column.rowID] + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -116,7 +116,7 @@ class TableRecordTests: GRDBTestCase { func testRestrictedDatabaseSelection() throws { struct Record: TableRecord { static let databaseTableName = "t1" - static let databaseSelection: [SQLSelectable] = [Column("a"), Column("b")] + static let databaseSelection: [any SQLSelectable] = [Column("a"), Column("b")] } let dbQueue = try makeDatabaseQueue() try dbQueue.inDatabase { db in @@ -127,7 +127,7 @@ class TableRecordTests: GRDBTestCase { } func testRecordInAttachedDatabase() throws { - #if SQLITE_HAS_CODEC + #if GRDBCIPHER_USE_ENCRYPTION // Avoid error due to key not being provided: // file is not a database - while executing `ATTACH DATABASE...` throw XCTSkip("This test does not support encrypted databases") @@ -185,7 +185,7 @@ class TableRecordTests: GRDBTestCase { } func testCrossAttachedDatabaseAssociation() throws { - #if SQLITE_HAS_CODEC + #if GRDBCIPHER_USE_ENCRYPTION // Avoid error due to key not being provided: // file is not a database - while executing `ATTACH DATABASE...` throw XCTSkip("This test does not support encrypted databases") diff --git a/Tests/GRDBTests/TableRecordUpdateTests.swift b/Tests/GRDBTests/TableRecordUpdateTests.swift index eeabd3bfc2..b5de3fab7a 100644 --- a/Tests/GRDBTests/TableRecordUpdateTests.swift +++ b/Tests/GRDBTests/TableRecordUpdateTests.swift @@ -17,7 +17,7 @@ private struct Player: Codable, PersistableRecord, FetchableRecord, Hashable { } } -@available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) +@available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) extension Player: Identifiable { } private enum Columns: String, ColumnExpression { @@ -56,7 +56,7 @@ class TableRecordUpdateTests: GRDBTestCase { UPDATE "player" SET "score" = 0 WHERE "id" IN (1, 2) """) - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { try Player.filter(id: 1).updateAll(db, assignment) XCTAssertEqual(self.lastSQLQuery, """ UPDATE "player" SET "score" = 0 WHERE "id" = 1 @@ -123,7 +123,7 @@ class TableRecordUpdateTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -146,7 +146,7 @@ class TableRecordUpdateTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -173,7 +173,7 @@ class TableRecordUpdateTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -201,7 +201,7 @@ class TableRecordUpdateTests: GRDBTestCase { throw XCTSkip("RETURNING clause is not available") } #else - guard #available(iOS 15.0, tvOS 15.0, watchOS 8.0, macOS 12.0, *) else { + guard #available(iOS 15, macOS 12, tvOS 15, watchOS 8, *) else { throw XCTSkip("RETURNING clause is not available") } #endif @@ -347,6 +347,110 @@ class TableRecordUpdateTests: GRDBTestCase { } } + func testAssignmentBitwiseAndAssign() throws { + try makeDatabaseQueue().write { db in + try Player.createTable(db) + + try Player.updateAll(db, Columns.score &= 1) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" & 1 + """) + + try Player.updateAll(db, Columns.score &= Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" & "bonus" + """) + + try Player.updateAll(db, Columns.score &= -Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" & (-"bonus") + """) + + try Player.updateAll(db, Columns.score &= Columns.bonus * 2) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" & ("bonus" * 2) + """) + } + } + + func testAssignmentBitwiseOrAssign() throws { + try makeDatabaseQueue().write { db in + try Player.createTable(db) + + try Player.updateAll(db, Columns.score |= 1) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" | 1 + """) + + try Player.updateAll(db, Columns.score |= Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" | "bonus" + """) + + try Player.updateAll(db, Columns.score |= -Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" | (-"bonus") + """) + + try Player.updateAll(db, Columns.score |= Columns.bonus * 2) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" | ("bonus" * 2) + """) + } + } + + func testAssignmentLeftShiftAssign() throws { + try makeDatabaseQueue().write { db in + try Player.createTable(db) + + try Player.updateAll(db, Columns.score <<= 1) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" << 1 + """) + + try Player.updateAll(db, Columns.score <<= Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" << "bonus" + """) + + try Player.updateAll(db, Columns.score <<= -Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" << (-"bonus") + """) + + try Player.updateAll(db, Columns.score <<= Columns.bonus * 2) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" << ("bonus" * 2) + """) + } + } + + func testAssignmentRightShiftAssign() throws { + try makeDatabaseQueue().write { db in + try Player.createTable(db) + + try Player.updateAll(db, Columns.score >>= 1) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" >> 1 + """) + + try Player.updateAll(db, Columns.score >>= Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" >> "bonus" + """) + + try Player.updateAll(db, Columns.score >>= -Columns.bonus) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" >> (-"bonus") + """) + + try Player.updateAll(db, Columns.score >>= Columns.bonus * 2) + XCTAssertEqual(self.lastSQLQuery, """ + UPDATE "player" SET "score" = "score" >> ("bonus" * 2) + """) + } + } + func testMultipleAssignments() throws { try makeDatabaseQueue().write { db in try Player.createTable(db) @@ -570,7 +674,7 @@ class TableRecordUpdateTests: GRDBTestCase { try db.create(table: "player") { t in t.autoIncrementedPrimaryKey("id") - t.column("teamId", .integer).references("team") + t.belongsTo("team") t.column("score", .integer) } diff --git a/Tests/GRDBTests/TableTests.swift b/Tests/GRDBTests/TableTests.swift index 6b67e80924..46c8f5a192 100644 --- a/Tests/GRDBTests/TableTests.swift +++ b/Tests/GRDBTests/TableTests.swift @@ -117,7 +117,7 @@ class TableTests: GRDBTestCase { """) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { struct Player: Identifiable { var id: Int64 } let t = Table("player") @@ -129,7 +129,7 @@ class TableTests: GRDBTestCase { """) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { struct Player: Identifiable { var id: Int64? } let t = Table("player") @@ -806,7 +806,7 @@ class TableTests: GRDBTestCase { """) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { // Non-optional ID struct Country: Identifiable { var id: String } @@ -821,7 +821,7 @@ class TableTests: GRDBTestCase { """) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { // Optional ID struct Country: Identifiable { var id: String? } @@ -920,7 +920,7 @@ class TableTests: GRDBTestCase { """) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { // Non-optional ID struct Country: Identifiable { var id: String } @@ -930,7 +930,7 @@ class TableTests: GRDBTestCase { """) } - if #available(OSX 10.15, iOS 13.0, tvOS 13.0, watchOS 6, *) { + if #available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) { // Optional ID struct Country: Identifiable { var id: String? } diff --git a/Tests/GRDBTests/TransactionDateTests.swift b/Tests/GRDBTests/TransactionDateTests.swift new file mode 100644 index 0000000000..5f2707223e --- /dev/null +++ b/Tests/GRDBTests/TransactionDateTests.swift @@ -0,0 +1,486 @@ +import XCTest +import GRDB + +class TransactionDateTests: GRDBTestCase { + func testTransactionDateOutsideOfTransaction() throws { + let dates = [ + Date.distantPast, + Date(), + Date.distantFuture, + ] + var dateIterator = dates.makeIterator() + dbConfiguration.transactionClock = .custom { _ in + dateIterator.next()! + } + + var collectedDates: [Date] = [] + try makeDatabaseQueue().inDatabase { db in + try collectedDates.append(db.transactionDate) + try collectedDates.append(db.transactionDate) + try collectedDates.append(db.transactionDate) + } + XCTAssertEqual(collectedDates, dates) + } + + func testTransactionDateInsideTransaction_commit() throws { + let dates = [ + Date.distantPast, + Date(), + Date.distantFuture, + ] + var dateIterator = dates.makeIterator() + dbConfiguration.transactionClock = .custom { _ in + dateIterator.next()! + } + + var collectedDates: [Date] = [] + try makeDatabaseQueue().inDatabase { db in + try collectedDates.append(db.transactionDate) + try db.execute(sql: "BEGIN") + try collectedDates.append(db.transactionDate) + try collectedDates.append(db.transactionDate) + try db.execute(sql: "COMMIT") + try collectedDates.append(db.transactionDate) + } + XCTAssertEqual(collectedDates, [dates[0], dates[1], dates[1], dates[2]]) + } + + func testTransactionDateInsideTransaction_rollback() throws { + let dates = [ + Date.distantPast, + Date(), + Date.distantFuture, + ] + var dateIterator = dates.makeIterator() + dbConfiguration.transactionClock = .custom { _ in + dateIterator.next()! + } + + var collectedDates: [Date] = [] + try makeDatabaseQueue().inDatabase { db in + try collectedDates.append(db.transactionDate) + try db.execute(sql: "BEGIN") + try collectedDates.append(db.transactionDate) + try collectedDates.append(db.transactionDate) + try db.execute(sql: "ROLLBACK") + try collectedDates.append(db.transactionDate) + } + XCTAssertEqual(collectedDates, [dates[0], dates[1], dates[1], dates[2]]) + } + + func testTransactionDateInsideTransaction_rollbackingError() throws { + let dates = [ + Date.distantPast, + Date(), + Date.distantFuture, + ] + var dateIterator = dates.makeIterator() + dbConfiguration.transactionClock = .custom { _ in + dateIterator.next()! + } + + var collectedDates: [Date] = [] + try makeDatabaseQueue().inDatabase { db in + try collectedDates.append(db.transactionDate) + try db.execute(sql: "BEGIN") + try collectedDates.append(db.transactionDate) + try collectedDates.append(db.transactionDate) + try? db.execute(sql: """ + CREATE TABLE t(id INTEGER PRIMARY KEY ON CONFLICT ROLLBACK); + INSERT INTO t VALUES (1); + INSERT INTO t VALUES (1); -- fails and rollbacks + """) + try collectedDates.append(db.transactionDate) + } + XCTAssertEqual(collectedDates, [dates[0], dates[1], dates[1], dates[2]]) + } + + func test_TimestampedRecord_default_willInsert() throws { + struct Player: Codable, MutablePersistableRecord, FetchableRecord, TimestampedRecord { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + } + + var currentDate = Date.distantPast + dbConfiguration.transactionClock = .custom { _ in currentDate } + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("creationDate", .datetime).notNull() + t.column("modificationDate", .datetime).notNull() + t.column("name", .text).notNull() + } + } + + currentDate = Date.distantPast + try dbQueue.write { db in + do { + var player = Player(name: "Arthur") + try player.insert(db) + XCTAssertEqual(player.creationDate, .distantPast) + XCTAssertEqual(player.modificationDate, .distantPast) + } + + do { + let customDate = Date() + var player = Player(name: "Arthur") + player.creationDate = customDate + player.modificationDate = customDate + try player.insert(db) + XCTAssertEqual(player.creationDate, customDate) + XCTAssertEqual(player.modificationDate, customDate) + } + } + } + + func test_TimestampedRecord_updateWithTimestamp() throws { + struct Player: Codable, MutablePersistableRecord, FetchableRecord, TimestampedRecord { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + } + + var currentDate = Date.distantPast + dbConfiguration.transactionClock = .custom { _ in currentDate } + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("creationDate", .datetime).notNull() + t.column("modificationDate", .datetime).notNull() + t.column("name", .text).notNull() + } + } + + currentDate = Date.distantPast + try dbQueue.write { db in + var player = Player(name: "Arthur") + try player.insert(db) + } + + let newTransactionDate = Date() + currentDate = newTransactionDate + try dbQueue.write { db in + var player = try Player.find(db, key: 1) + + player.name = "Barbara" + try player.updateWithTimestamp(db) + XCTAssertEqual(player.creationDate, .distantPast) + XCTAssertEqual(player.modificationDate, newTransactionDate) + + try player.updateWithTimestamp(db, modificationDate: .distantFuture) + XCTAssertEqual(player.creationDate, .distantPast) + XCTAssertEqual(player.modificationDate, .distantFuture) + } + } + + func test_TimestampedRecord_updateChangesWithTimestamp() throws { + struct Player: Codable, MutablePersistableRecord, FetchableRecord, TimestampedRecord { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + } + + var currentDate = Date.distantPast + dbConfiguration.transactionClock = .custom { _ in currentDate } + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("creationDate", .datetime).notNull() + t.column("modificationDate", .datetime).notNull() + t.column("name", .text).notNull() + } + } + + currentDate = Date.distantPast + try dbQueue.write { db in + var player = Player(name: "Arthur") + try player.insert(db) + } + + let newTransactionDate = Date() + currentDate = newTransactionDate + try dbQueue.write { db in + var player = try Player.find(db, key: 1) + + let changed = try player.updateChangesWithTimestamp(db) { + $0.name = "Barbara" + } + XCTAssertTrue(changed) + XCTAssertEqual(player.creationDate, .distantPast) + XCTAssertEqual(player.modificationDate, newTransactionDate) + } + + try dbQueue.write { db in + var player = try Player.find(db, key: 1) + + let changed = try player.updateChangesWithTimestamp(db) { + $0.name = "Barbara" + } + XCTAssertFalse(changed) + } + } + + func test_TimestampedRecord_touch() throws { + struct Player: Codable, MutablePersistableRecord, FetchableRecord, TimestampedRecord { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + } + + var currentDate = Date.distantPast + dbConfiguration.transactionClock = .custom { _ in currentDate } + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("creationDate", .datetime).notNull() + t.column("modificationDate", .datetime).notNull() + t.column("name", .text).notNull() + } + + var player = Player(name: "Arthur") + try player.insert(db) + } + + let newTransactionDate = Date() + currentDate = newTransactionDate + try dbQueue.write { db in + var player = try Player.find(db, key: 1) + try player.touch(db) + XCTAssertEqual(player.modificationDate, newTransactionDate) + + try player.touch(db, modificationDate: .distantFuture) + XCTAssertEqual(player.modificationDate, .distantFuture) + } + } + + func test_TimestampedRecord_struct_with_customized_willInsert() throws { + struct Player: Codable, TimestampedRecord, FetchableRecord { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + var isInserted = false // transient + + enum CodingKeys: String, CodingKey { + case id + case creationDate + case modificationDate + case name + } + + mutating func willInsert(_ db: Database) throws { + isInserted = true + try initializeTimestamps(db) + } + + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + } + + var currentDate = Date.distantPast + dbConfiguration.transactionClock = .custom { _ in currentDate } + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("creationDate", .datetime).notNull() + t.column("modificationDate", .datetime).notNull() + t.column("name", .text).notNull() + } + } + + currentDate = Date.distantPast + try dbQueue.write { db in + var player = Player(name: "Arthur", isInserted: false) + try player.insert(db) + XCTAssertTrue(player.isInserted) + XCTAssertEqual(player.creationDate, .distantPast) + XCTAssertEqual(player.modificationDate, .distantPast) + } + } + + func test_TimestampedRecord_class_with_non_mutating_willInsert() throws { + class Player: Codable, TimestampedRecord, PersistableRecord, FetchableRecord { + var id: Int64? + var creationDate: Date? + var modificationDate: Date? + var name: String + + init(id: Int64? = nil, creationDate: Date? = nil, modificationDate: Date? = nil, name: String) { + self.id = id + self.creationDate = creationDate + self.modificationDate = modificationDate + self.name = name + } + + func willInsert(_ db: Database) throws { + // Can't call initializeTimestamps because it is mutating + if creationDate == nil { + creationDate = try db.transactionDate + } + if modificationDate == nil { + modificationDate = try db.transactionDate + } + } + + func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } + } + + var currentDate = Date.distantPast + dbConfiguration.transactionClock = .custom { _ in currentDate } + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("creationDate", .datetime).notNull() + t.column("modificationDate", .datetime).notNull() + t.column("name", .text).notNull() + } + } + + currentDate = Date.distantPast + try dbQueue.write { db in + let player = Player(name: "Arthur") + try player.insert(db) + XCTAssertEqual(player.creationDate, .distantPast) + XCTAssertEqual(player.modificationDate, .distantPast) + } + } +} + +// The protocol in RecordTimestamps.md + +/// A record type that tracks its creation and modification dates. See +/// +protocol TimestampedRecord: MutablePersistableRecord { + var creationDate: Date? { get set } + var modificationDate: Date? { get set } +} + +extension TimestampedRecord { + /// By default, `TimestampedRecord` types set `creationDate` and + /// `modificationDate` to the transaction date, if they are nil, + /// before insertion. + /// + /// `TimestampedRecord` types that customize the `willInsert` + /// persistence callback should call `initializeTimestamps` from + /// their implementation. + mutating func willInsert(_ db: Database) throws { + try initializeTimestamps(db) + } + + /// Sets `creationDate` and `modificationDate` to the transaction date, + /// if they are nil. + /// + /// It is called automatically before insertion, if your type does not + /// customize the `willInsert` persistence callback. If you customize + /// this callback, call `initializeTimestamps` from your implementation. + mutating func initializeTimestamps(_ db: Database) throws { + if creationDate == nil { + creationDate = try db.transactionDate + } + if modificationDate == nil { + modificationDate = try db.transactionDate + } + } + + /// Sets `modificationDate`, and executes an `UPDATE` statement + /// on all columns. + /// + /// - parameter modificationDate: The modification date. If nil, the + /// transaction date is used. + mutating func updateWithTimestamp(_ db: Database, modificationDate: Date? = nil) throws { + self.modificationDate = try modificationDate ?? db.transactionDate + try update(db) + } + + /// Modifies the record according to the provided `modify` closure, and, + /// if and only if the record was modified, sets `modificationDate` and + /// executes an `UPDATE` statement that updates the modified columns. + /// + /// For example: + /// + /// ```swift + /// try dbQueue.write { db in + /// var player = Player.find(db, id: 1) + /// let modified = try player.updateChangesWithTimestamp(db) { + /// $0.score = 1000 + /// } + /// if modified { + /// print("player was modified") + /// } else { + /// print("player was not modified") + /// } + /// } + /// ``` + /// + /// - parameters: + /// - db: A database connection. + /// - modificationDate: The modification date. If nil, the + /// transaction date is used. + /// - modify: A closure that modifies the record. + /// - returns: Whether the record was changed and updated. + @discardableResult + mutating func updateChangesWithTimestamp( + _ db: Database, + modificationDate: Date? = nil, + modify: (inout Self) -> Void) + throws -> Bool + { + // Grab the changes performed by `modify` + let initialChanges = try databaseChanges(modify: modify) + if initialChanges.isEmpty { + return false + } + + // Update modification date and grab its column name + let dateChanges = try databaseChanges(modify: { + $0.modificationDate = try modificationDate ?? db.transactionDate + }) + + // Update the modified columns + let modifiedColumns = Set(initialChanges.keys).union(dateChanges.keys) + try update(db, columns: modifiedColumns) + return true + } + + /// Sets `modificationDate`, and executes an `UPDATE` statement that + /// updates the `modificationDate` column, if and only if the record + /// was modified. + /// + /// - parameter modificationDate: The modification date. If nil, the + /// transaction date is used. + mutating func touch(_ db: Database, modificationDate: Date? = nil) throws { + try updateChanges(db) { + $0.modificationDate = try modificationDate ?? db.transactionDate + } + } +} diff --git a/Tests/GRDBTests/TransactionObserverSavepointsTests.swift b/Tests/GRDBTests/TransactionObserverSavepointsTests.swift index fd700a450a..b0df5932b7 100644 --- a/Tests/GRDBTests/TransactionObserverSavepointsTests.swift +++ b/Tests/GRDBTests/TransactionObserverSavepointsTests.swift @@ -40,16 +40,16 @@ class TransactionObserverSavepointsTests: GRDBTestCase { private func match(preUpdateEvent event: DatabasePreUpdateEvent, kind: DatabasePreUpdateEvent.Kind, tableName: String, initialRowID: Int64?, finalRowID: Int64?, initialValues: [DatabaseValue]?, finalValues: [DatabaseValue]?, depth: CInt = 0) -> Bool { func check(_ dbValues: [DatabaseValue]?, expected: [DatabaseValue]?) -> Bool { - if let dbValues = dbValues { - guard let expected = expected else { return false } + if let dbValues { + guard let expected else { return false } return dbValues == expected } else { return expected == nil } } var count : Int = 0 - if let initialValues = initialValues { count = initialValues.count } - if let finalValues = finalValues { count = max(count, finalValues.count) } + if let initialValues { count = initialValues.count } + if let finalValues { count = max(count, finalValues.count) } guard (event.kind == kind) else { return false } guard (event.tableName == tableName) else { return false } diff --git a/Tests/GRDBTests/TransactionObserverTests.swift b/Tests/GRDBTests/TransactionObserverTests.swift index 446927342b..0730f42f29 100644 --- a/Tests/GRDBTests/TransactionObserverTests.swift +++ b/Tests/GRDBTests/TransactionObserverTests.swift @@ -16,18 +16,20 @@ private class Observer : TransactionObserver { } deinit { - if let deinitBlock = deinitBlock { + if let deinitBlock { deinitBlock() } } var didChangeCount: Int = 0 + var didChangeWithEventCount: Int = 0 var willCommitCount: Int = 0 var didCommitCount: Int = 0 var didRollbackCount: Int = 0 func resetCounts() { didChangeCount = 0 + didChangeWithEventCount = 0 willCommitCount = 0 didCommitCount = 0 didRollbackCount = 0 @@ -50,14 +52,18 @@ private class Observer : TransactionObserver { observesBlock(eventKind) } - func databaseDidChange(with event: DatabaseEvent) { + func databaseDidChange() { didChangeCount += 1 + } + + func databaseDidChange(with event: DatabaseEvent) { + didChangeWithEventCount += 1 events.append(event.copy()) } func databaseWillCommit() throws { willCommitCount += 1 - if let commitError = commitError { + if let commitError { throw commitError } } @@ -121,7 +127,7 @@ extension Artwork : FetchableRecord, PersistableRecord { } class TransactionObserverTests: GRDBTestCase { - private func setupArtistDatabase(in dbWriter: DatabaseWriter) throws { + private func setupArtistDatabase(in dbWriter: some DatabaseWriter) throws { try dbWriter.write { db in try db.execute(sql: """ CREATE TABLE artists ( @@ -144,16 +150,16 @@ class TransactionObserverTests: GRDBTestCase { private func match(preUpdateEvent event: DatabasePreUpdateEvent, kind: DatabasePreUpdateEvent.Kind, tableName: String, initialRowID: Int64?, finalRowID: Int64?, initialValues: [DatabaseValue]?, finalValues: [DatabaseValue]?, depth: CInt = 0) -> Bool { func check(_ dbValues: [DatabaseValue]?, expected: [DatabaseValue]?) -> Bool { - if let dbValues = dbValues { - guard let expected = expected else { return false } + if let dbValues { + guard let expected else { return false } return dbValues == expected } else { return expected == nil } } var count : Int = 0 - if let initialValues = initialValues { count = initialValues.count } - if let finalValues = finalValues { count = max(count, finalValues.count) } + if let initialValues { count = initialValues.count } + if let finalValues { count = max(count, finalValues.count) } guard (event.kind == kind) else { return false } guard (event.tableName == tableName) else { return false } @@ -796,7 +802,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -826,7 +833,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 3) // 3 deletes #endif - XCTAssertEqual(observer.didChangeCount, 3) // 3 deletes + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 3) // 3 deletes XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -894,7 +902,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -907,7 +916,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -917,7 +927,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -929,6 +940,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1003,7 +1015,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 3) // 3 deletes #endif - XCTAssertEqual(observer.didChangeCount, 3) // 3 deletes + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 3) // 3 deletes XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1015,6 +1028,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1133,6 +1147,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 1) @@ -1155,6 +1170,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1168,6 +1184,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1192,6 +1209,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1206,6 +1224,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 1) @@ -1229,7 +1248,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 1) @@ -1260,7 +1280,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 1) @@ -1285,6 +1306,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1298,6 +1320,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1323,6 +1346,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1337,6 +1361,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 1) @@ -1369,7 +1394,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1479,6 +1505,7 @@ class TransactionObserverTests: GRDBTestCase { XCTAssertEqual(observer.willChangeCount, 0) #endif XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1504,6 +1531,7 @@ class TransactionObserverTests: GRDBTestCase { } XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1522,7 +1550,8 @@ class TransactionObserverTests: GRDBTestCase { return .commit } - XCTAssertEqual(observer.didChangeCount, 3) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 3) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1550,7 +1579,8 @@ class TransactionObserverTests: GRDBTestCase { return .commit } - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1578,7 +1608,8 @@ class TransactionObserverTests: GRDBTestCase { return .commit } - XCTAssertEqual(observer.didChangeCount, 2) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 2) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1613,7 +1644,8 @@ class TransactionObserverTests: GRDBTestCase { return .commit } - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1646,7 +1678,8 @@ class TransactionObserverTests: GRDBTestCase { return .commit } - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1660,12 +1693,14 @@ class TransactionObserverTests: GRDBTestCase { class Observer: TransactionObserver { var didChangeCount: Int = 0 + var didChangeWithEventCount: Int = 0 var willCommitCount: Int = 0 var didCommitCount: Int = 0 var didRollbackCount: Int = 0 func resetCounts() { didChangeCount = 0 + didChangeWithEventCount = 0 willCommitCount = 0 didCommitCount = 0 didRollbackCount = 0 @@ -1681,8 +1716,12 @@ class TransactionObserverTests: GRDBTestCase { func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool { true } - func databaseDidChange(with event: DatabaseEvent) { + func databaseDidChange() { didChangeCount += 1 + } + + func databaseDidChange(with event: DatabaseEvent) { + didChangeWithEventCount += 1 if event.tableName == "ignore" { stopObservingDatabaseChangesUntilNextTransaction() } @@ -1720,7 +1759,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 2) #endif - XCTAssertEqual(observer.didChangeCount, 2) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 2) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1739,7 +1779,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 1) #endif - XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 1) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1758,7 +1799,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 2) #endif - XCTAssertEqual(observer.didChangeCount, 2) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 2) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1777,7 +1819,8 @@ class TransactionObserverTests: GRDBTestCase { #if SQLITE_ENABLE_PREUPDATE_HOOK XCTAssertEqual(observer.willChangeCount, 3) #endif - XCTAssertEqual(observer.didChangeCount, 3) + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 3) XCTAssertEqual(observer.willCommitCount, 1) XCTAssertEqual(observer.didCommitCount, 1) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1785,6 +1828,687 @@ class TransactionObserverTests: GRDBTestCase { } } + // MARK: - Unspecified changes + + func testUnspecifiedChangeInFullDatabase() throws { + let dbQueue = try makeDatabaseQueue() + try setupArtistDatabase(in: dbQueue) + + do { + let observer = Observer(observes: { _ in true }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { _ in false }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + switch eventKind { + case .insert: + return true + case .update: + return false + case .delete: + return false + } + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + switch eventKind { + case .insert: + return false + case .update: + return true + case .delete: + return false + } + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + switch eventKind { + case .insert: + return false + case .update: + return false + case .delete: + return true + } + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + eventKind.tableName == "artists" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + eventKind.tableName == "non_existing" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + } + + func testUnspecifiedChangeInEmptyRegion() throws { + let dbQueue = try makeDatabaseQueue() + try setupArtistDatabase(in: dbQueue) + + let observer = Observer(observes: { _ in true }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: DatabaseRegion()) + } + + // No change detected because changed region is empty + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + func testUnspecifiedChangeInEmptyDatabase() throws { + let dbQueue = try makeDatabaseQueue() + + let observer = Observer(observes: { _ in true }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + // No change detected because there is no table + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + func testUnspecifiedChange_sqlite_master() throws { + do { + let dbQueue = try makeDatabaseQueue() + let observer = Observer(observes: { eventKind in + eventKind.tableName == "sqlite_master" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + // Undetected because the full database region does not include sqlite_master + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let dbQueue = try makeDatabaseQueue() + let observer = Observer(observes: { eventKind in + eventKind.tableName == "sqlite_master" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("sqlite_master")) + } + + // Detected because explicit sqlite_master region + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + } + + func testUnspecifiedChange_sqlite_temp_master() throws { + do { + let dbQueue = try makeDatabaseQueue() + let observer = Observer(observes: { eventKind in + eventKind.tableName == "sqlite_temp_master" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("sqlite_temp_master")) + } + + // Undetected because there is no temp schema + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let dbQueue = try makeDatabaseQueue() + let observer = Observer(observes: { eventKind in + eventKind.tableName == "sqlite_temp_master" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + // Create temp schema + try db.execute(sql: "CREATE TEMPORARY TABLE t(a)") + + // Explicit sqlite_temp_master + try db.notifyChanges(in: Table("sqlite_temp_master")) + } + + // Detected because the temp schema exists. + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + } + + func testUnspecifiedChangeToTable() throws { + let dbQueue = try makeDatabaseQueue() + try setupArtistDatabase(in: dbQueue) + + do { + let observer = Observer(observes: { _ in true }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("artists")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { _ in false }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("artists")) + } + + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + switch eventKind { + case .insert: + return true + case .update: + return false + case .delete: + return false + } + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("artists")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + switch eventKind { + case .insert: + return false + case .update: + return true + case .delete: + return false + } + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("artists")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + switch eventKind { + case .insert: + return false + case .update: + return false + case .delete: + return true + } + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("artists")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + eventKind.tableName == "artists" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("artists")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + eventKind.tableName == "artists" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + // Case insensitivity (observer has to use the canonical name). + try db.notifyChanges(in: Table("ARTISTS")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + eventKind.tableName == "artworks" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("artists")) + } + + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + } + + func testUnspecifiedChangeToColumn() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "test") { t in + t.autoIncrementedPrimaryKey("id") + t.column("a") + t.column("b") + } + } + + do { + let observer = Observer(observes: { eventKind in + if case .update("test", let columns) = eventKind, columns.contains("a") { + return true + } + return false + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + if case .update("test", let columns) = eventKind, columns.contains("a") { + return true + } + return false + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("test")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + if case .update("test", let columns) = eventKind, columns.contains("a") { + return true + } + return false + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("test").select(Column("a"))) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + if case .update("test", let columns) = eventKind, columns.contains("a") { + return true + } + return false + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + // Case insensitivity + try db.notifyChanges(in: Table("TEST").select(Column("A"))) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + if case .update("test", let columns) = eventKind, columns.contains("a") { + return true + } + return false + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("test").select(Column("b"))) + } + + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + } + + func testUnspecifiedChangeToTemporaryTable() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.create(table: "test", options: .temporary) { t in + t.autoIncrementedPrimaryKey("id") + } + } + + do { + let observer = Observer(observes: { eventKind in + eventKind.tableName == "test" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: .fullDatabase) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + do { + let observer = Observer(observes: { eventKind in + eventKind.tableName == "test" + }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.write { db in + try db.notifyChanges(in: Table("test")) + } + + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + } + + func testUnspecifiedChangeFromReadOnlyAccess() throws { + let dbQueue = try makeDatabaseQueue() + try setupArtistDatabase(in: dbQueue) + + let observer = Observer(observes: { _ in true }) + dbQueue.add(transactionObserver: observer) + + try dbQueue.read { db in + try db.notifyChanges(in: .fullDatabase) + } + + // No change detected from read-only access + XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) + XCTAssertEqual(observer.willCommitCount, 0) + XCTAssertEqual(observer.didCommitCount, 0) + XCTAssertEqual(observer.didRollbackCount, 0) + XCTAssertEqual(observer.lastCommittedEvents.count, 0) + } + + func test_stopObservingDatabaseChangesUntilNextTransaction_from_databaseDidChange() throws { + class Observer: TransactionObserver { + var didChangeCount: Int = 0 + var didChangeWithEventCount: Int = 0 + var willCommitCount: Int = 0 + var didCommitCount: Int = 0 + var didRollbackCount: Int = 0 + + #if SQLITE_ENABLE_PREUPDATE_HOOK + var willChangeCount: Int = 0 + func databaseWillChange(with event: DatabasePreUpdateEvent) { willChangeCount += 1 } + #endif + + func observes(eventsOfKind eventKind: DatabaseEventKind) -> Bool { true } + + func databaseDidChange() { + didChangeCount += 1 + stopObservingDatabaseChangesUntilNextTransaction() + } + + func databaseDidChange(with event: DatabaseEvent) { + didChangeWithEventCount += 1 + } + + func databaseWillCommit() throws { willCommitCount += 1 } + func databaseDidCommit(_ db: Database) { didCommitCount += 1 } + func databaseDidRollback(_ db: Database) { didRollbackCount += 1 } + } + + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.execute(sql: "CREATE TABLE test(a)") + } + + let observer = Observer() + dbQueue.add(transactionObserver: observer, extent: .databaseLifetime) + + try dbQueue.write { db in + // detected + try db.execute(sql: "INSERT INTO test (a) VALUES (1)") + try db.notifyChanges(in: .fullDatabase) + // ignored + try db.execute(sql: "INSERT INTO test (a) VALUES (2)") + } + + #if SQLITE_ENABLE_PREUPDATE_HOOK + XCTAssertEqual(observer.willChangeCount, 1) + #endif + XCTAssertEqual(observer.didChangeCount, 1) + XCTAssertEqual(observer.didChangeWithEventCount, 1) + XCTAssertEqual(observer.willCommitCount, 1) + XCTAssertEqual(observer.didCommitCount, 1) + XCTAssertEqual(observer.didRollbackCount, 0) + } + // MARK: - Read-Only Connection func testReadOnlyConnection() throws { @@ -1804,6 +2528,7 @@ class TransactionObserverTests: GRDBTestCase { COMMIT; """) XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1819,6 +2544,7 @@ class TransactionObserverTests: GRDBTestCase { COMMIT; """) XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1844,6 +2570,7 @@ class TransactionObserverTests: GRDBTestCase { COMMIT; """) XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) @@ -1859,6 +2586,7 @@ class TransactionObserverTests: GRDBTestCase { COMMIT; """) XCTAssertEqual(observer.didChangeCount, 0) + XCTAssertEqual(observer.didChangeWithEventCount, 0) XCTAssertEqual(observer.willCommitCount, 0) XCTAssertEqual(observer.didCommitCount, 0) XCTAssertEqual(observer.didRollbackCount, 0) diff --git a/Tests/GRDBTests/UpdateStatementTests.swift b/Tests/GRDBTests/UpdateStatementTests.swift index 0db27cf652..faf20f6763 100644 --- a/Tests/GRDBTests/UpdateStatementTests.swift +++ b/Tests/GRDBTests/UpdateStatementTests.swift @@ -52,7 +52,7 @@ class UpdateStatementTests : GRDBTestCase { try dbQueue.inTransaction { db in let statement = try db.makeStatement(sql: "INSERT INTO persons (name, age) VALUES (?, ?)") - let persons: [[DatabaseValueConvertible?]] = [ + let persons: [[(any DatabaseValueConvertible)?]] = [ ["Arthur", 41], ["Barbara", nil], ] @@ -79,7 +79,7 @@ class UpdateStatementTests : GRDBTestCase { try dbQueue.inTransaction { db in let statement = try db.makeStatement(sql: "INSERT INTO persons (name, age) VALUES (?, ?)") - let persons: [[DatabaseValueConvertible?]] = [ + let persons: [[(any DatabaseValueConvertible)?]] = [ ["Arthur", 41], ["Barbara", nil], ] @@ -107,7 +107,7 @@ class UpdateStatementTests : GRDBTestCase { try dbQueue.inTransaction { db in let statement = try db.makeStatement(sql: "INSERT INTO persons (name, age) VALUES (:name, :age)") - let persons: [[String: DatabaseValueConvertible?]] = [ + let persons: [[String: (any DatabaseValueConvertible)?]] = [ ["name": "Arthur", "age": 41], ["name": "Barbara", "age": nil], ] @@ -134,7 +134,7 @@ class UpdateStatementTests : GRDBTestCase { try dbQueue.inTransaction { db in let statement = try db.makeStatement(sql: "INSERT INTO persons (name, age) VALUES (:name, :age)") - let persons: [[String: DatabaseValueConvertible?]] = [ + let persons: [[String: (any DatabaseValueConvertible)?]] = [ ["name": "Arthur", "age": 41], ["name": "Barbara", "age": nil], ] @@ -388,7 +388,7 @@ class UpdateStatementTests : GRDBTestCase { XCTAssertEqual(error.description, """ SQLite error 21: Multiple statements found. To execute multiple statements, \ use Database.execute(sql:) or Database.allStatements(sql:) instead. \ - - while executing `UPDATE persons SET age = 1; UPDATE persons SET age = 2;` + - while executing `UPDATE persons SET age = 1; UPDATE persons SET age = 2` """) } } @@ -441,4 +441,155 @@ class UpdateStatementTests : GRDBTestCase { XCTAssertEqual(value, 3) } } + + // MARK: - SQLITE_STATIC vs SQLITE_TRANSIENT + + func test_SQLITE_STATIC_then_SQLITE_TRANSIENT() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.execute(sql: """ + CREATE TABLE t(a); + """) + + func test(value: some DatabaseValueConvertible) throws { + defer { try! db.execute(sql: "DELETE FROM t") } + + // Execute with temporary bindings (SQLITE_STATIC) + let statement = try db.makeStatement(sql: "INSERT INTO t VALUES (?)") + try statement.execute(arguments: [value]) + + // Execute with non temporary bindings (SQLITE_TRANSIENT) + try statement.execute() + + // Since bindings are not temporary, they are not cleared, + // so insert the value again. + sqlite3_reset(statement.sqliteStatement) + sqlite3_step(statement.sqliteStatement) + sqlite3_reset(statement.sqliteStatement) + + // Test that we have inserted the value thrice. + try XCTAssertEqual( + DatabaseValue.fetchSet(db, sql: "SELECT a FROM t"), + [value.databaseValue]) + } + + try test(value: "Foo") + try test(value: "") + try test(value: "Hello".data(using: .utf8)!) + try test(value: Data()) + try test(value: 42) + try test(value: 1.23) + } + } + + func test_SQLITE_STATIC() throws { + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + try db.execute(sql: """ + CREATE TABLE t(a); + """) + + func test(value: some DatabaseValueConvertible) throws { + defer { try! db.execute(sql: "DELETE FROM t") } + + // Execute with temporary bindings (SQLITE_STATIC) + let statement = try db.makeStatement(sql: "INSERT INTO t VALUES (?)") + try statement.execute(arguments: [value]) + + // Since bindings were temporary, and cleared, we now insert NULL + sqlite3_reset(statement.sqliteStatement) + sqlite3_step(statement.sqliteStatement) + sqlite3_reset(statement.sqliteStatement) + + // Test that we have inserted the value, and NULL + try XCTAssertEqual( + DatabaseValue.fetchSet(db, sql: "SELECT a FROM t"), + [value.databaseValue, .null]) + } + + try test(value: "Foo") + try test(value: "") + try test(value: "Hello".data(using: .utf8)!) + try test(value: Data()) + try test(value: 42) + try test(value: 1.23) + } + } + + func test_SQLITE_TRANSIENT_due_to_high_number_of_arguments() throws { + // SQLITE_STATIC optimization is disabled for more than 20 arguments. + let argumentCount = 21 + + let dbQueue = try makeDatabaseQueue() + try dbQueue.write { db in + // "a0, a1, a2, ..." + let columns = (0.. String { + private func region(sql: String, in dbReader: some DatabaseReader) throws -> String { try dbReader.read { db in try db .makeStatement(sql: sql) @@ -436,13 +436,17 @@ class ValueObservationPrintTests: GRDBTestCase { onChange: { _ in expectation.fulfill() }) withExtendedLifetime(cancellable) { waitForExpectations(timeout: 1, handler: nil) - XCTAssertEqual(logger.strings, [ - "start", - "fetch", - "value: nil", + // Order of events may not be stable, because the first + // "value: nil" is notified concurrently (from the reduce queue) + // with the first "database did change" (from the writer queue). + // That's why we test the sorted output. + XCTAssertEqual(logger.strings.sorted(), [ "database did change", "fetch", + "fetch", + "start", "tracked region: \(expectedRegion)", + "value: nil", "value: nil"]) } } diff --git a/Tests/GRDBTests/ValueObservationQueryInterfaceRequestTests.swift b/Tests/GRDBTests/ValueObservationQueryInterfaceRequestTests.swift index 30317ed65d..c11704e347 100644 --- a/Tests/GRDBTests/ValueObservationQueryInterfaceRequestTests.swift +++ b/Tests/GRDBTests/ValueObservationQueryInterfaceRequestTests.swift @@ -26,7 +26,7 @@ class ValueObservationQueryInterfaceRequestTests: GRDBTestCase { } try db.create(table: "child") { t in t.autoIncrementedPrimaryKey("id") - t.column("parentId", .integer).references("parent", onDelete: .cascade) + t.belongsTo("parent", onDelete: .cascade) t.column("name", .text) } } diff --git a/Tests/GRDBTests/ValueObservationRecorder.swift b/Tests/GRDBTests/ValueObservationRecorder.swift index 2b585a740e..fdc799f5a6 100644 --- a/Tests/GRDBTests/ValueObservationRecorder.swift +++ b/Tests/GRDBTests/ValueObservationRecorder.swift @@ -310,43 +310,43 @@ extension XCTestCase { /// - `[0, 1, 2]` (unexpected value) /// - `[1, 0, 1]` (unexpected value) func assertValueObservationRecordingMatch( - recorded recordedValues: R, - expected expectedValues: E, - allowMissingLastValue: Bool = false, + recorded: R, + expected: E, _ message: @autoclosure () -> String = "", file: StaticString = #file, line: UInt = #line) where - R: BidirectionalCollection, - E: BidirectionalCollection, + R: Collection, + E: Collection, R.Element == E.Element, R.Element: Equatable { - guard let value = expectedValues.last else { - if !recordedValues.isEmpty { - XCTFail("unexpected recorded prefix \(Array(recordedValues)) - \(message())", file: file, line: line) - } - return + XCTAssertTrue( + valueObservationRecordingMatch(recorded: recorded, expected: expected), + "Unexpected recording \(Array(recorded)) - \(message())", + file: file, line: line) + } + + func valueObservationRecordingMatch( + recorded: R, + expected: E) + -> Bool + where R: Collection, + E: Collection, + R.Element == E.Element, + R.Element: Equatable + { + guard let first = recorded.first else { + return expected.isEmpty } - let recordedSuffix = recordedValues.reversed().prefix(while: { $0 == value }) - let expectedSuffix = expectedValues.reversed().prefix(while: { $0 == value }) - if !allowMissingLastValue { - // Both missing and repeated values are allowed in the recorded values. - // This is because of asynchronous DatabasePool observations. - if recordedSuffix.isEmpty { - XCTFail("missing expected value \(value) - \(message()) in \(recordedValues)", file: file, line: line) + return expected.indices.lazy + .filter { expected[$0] == first } + .contains { + valueObservationRecordingMatch( + recorded: recorded.drop(while: { $0 == first }), + expected: expected[$0...].dropFirst()) } - } - - let remainingRecordedValues = recordedValues.prefix(recordedValues.count - recordedSuffix.count) - let remainingExpectedValues = expectedValues.prefix(expectedValues.count - expectedSuffix.count) - assertValueObservationRecordingMatch( - recorded: remainingRecordedValues, - expected: remainingExpectedValues, - // Other values can be missed - allowMissingLastValue: true, - message(), file: file, line: line) } } @@ -567,7 +567,7 @@ extension GRDBTestCase { func assertValueObservation( _ observation: ValueObservation, - fails testFailure: (Failure, DatabaseWriter) throws -> Void, + fails testFailure: (Failure, any DatabaseWriter) throws -> Void, setup: (Database) throws -> Void, file: StaticString = #file, line: UInt = #line) @@ -668,7 +668,7 @@ extension ValueObservationExpectations { consume(1) return next } - if let error = error { + if let error { throw error } else { throw ValueRecordingError.notEnoughValues @@ -696,7 +696,7 @@ extension ValueObservationExpectations { if remainingValues.isEmpty == false { return } - if let error = error { + if let error { throw error } } @@ -732,7 +732,7 @@ extension ValueObservationExpectations { consume(count) return Array(remainingValues.prefix(count)) } - if let error = error { + if let error { throw error } else { throw ValueRecordingError.notEnoughValues @@ -783,7 +783,7 @@ extension ValueObservationExpectations { consume(extraCount) return Array(values.prefix(matchedCount)) } - if let error = error { + if let error { throw error } consume(remainingValues.count) @@ -807,7 +807,7 @@ extension ValueObservationExpectations { public func get() throws -> (values: [Value], error: Error) { try recorder.value { (values, error, remainingValues, consume) in - if let error = error { + if let error { consume(remainingValues.count) return (values: values, error: error) } else { diff --git a/Tests/GRDBTests/ValueObservationRecorderTests.swift b/Tests/GRDBTests/ValueObservationRecorderTests.swift index 51e87afba9..4b8e8a2ae0 100644 --- a/Tests/GRDBTests/ValueObservationRecorderTests.swift +++ b/Tests/GRDBTests/ValueObservationRecorderTests.swift @@ -679,112 +679,105 @@ class ValueObservationRecorderTests: FailureTestCase { func testAssertValueObservationRecordingMatch() { do { let expected = [3] - assertValueObservationRecordingMatch(recorded: [3], expected: expected) - assertValueObservationRecordingMatch(recorded: [3, 3], expected: expected) - assertFailure("failed - missing expected value 3") { - assertValueObservationRecordingMatch(recorded: [], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [2]", "failed - missing expected value 3") { - assertValueObservationRecordingMatch(recorded: [2], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [2]") { - assertValueObservationRecordingMatch(recorded: [2, 3], expected: expected) - } + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 3], expected: expected)) + + XCTAssertFalse(valueObservationRecordingMatch(recorded: [], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2, 3], expected: expected)) } do { let expected = [2, 3] - assertValueObservationRecordingMatch(recorded: [3], expected: expected) - assertValueObservationRecordingMatch(recorded: [3, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [3, 3, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [2, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [2, 2, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [2, 2, 3, 3], expected: expected) - assertFailure("failed - missing expected value 3") { - assertValueObservationRecordingMatch(recorded: [], expected: expected) - } - assertFailure("failed - missing expected value 3") { - assertValueObservationRecordingMatch(recorded: [2], expected: expected) - } - assertFailure("failed - missing expected value 3", "failed - unexpected recorded prefix [3]") { - assertValueObservationRecordingMatch(recorded: [3, 2], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [1]") { - assertValueObservationRecordingMatch(recorded: [1, 3], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [1]") { - assertValueObservationRecordingMatch(recorded: [1, 2, 3], expected: expected) - } + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 3, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 2, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 2, 3, 3], expected: expected)) + + XCTAssertFalse(valueObservationRecordingMatch(recorded: [], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [3, 2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [1, 3], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [1, 2, 3], expected: expected)) } do { let expected = [3, 3] - assertValueObservationRecordingMatch(recorded: [3], expected: expected) - assertValueObservationRecordingMatch(recorded: [3, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [3, 3, 3], expected: expected) - assertFailure("failed - missing expected value 3") { - assertValueObservationRecordingMatch(recorded: [], expected: expected) - } - assertFailure("failed - missing expected value 3", "failed - unexpected recorded prefix [2]") { - assertValueObservationRecordingMatch(recorded: [2], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [2]") { - assertValueObservationRecordingMatch(recorded: [2, 3], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [2, 2]") { - assertValueObservationRecordingMatch(recorded: [2, 2, 3], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [1, 2]") { - assertValueObservationRecordingMatch(recorded: [1, 2, 3], expected: expected) - } + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 3, 3], expected: expected)) + + XCTAssertFalse(valueObservationRecordingMatch(recorded: [], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2, 3], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2, 2, 3], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [1, 2, 3], expected: expected)) } do { let expected = [1, 2, 2, 3] - assertValueObservationRecordingMatch(recorded: [3], expected: expected) - assertValueObservationRecordingMatch(recorded: [3, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 3, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 1, 3, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 2, 2, 3], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 2, 3], expected: expected) - assertFailure("failed - missing expected value 3") { - assertValueObservationRecordingMatch(recorded: [], expected: expected) - } - assertFailure("failed - missing expected value 3") { - assertValueObservationRecordingMatch(recorded: [2], expected: expected) - } - assertFailure("failed - missing expected value 3", "failed - unexpected recorded prefix [3]") { - assertValueObservationRecordingMatch(recorded: [3, 2], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [2]") { - assertValueObservationRecordingMatch(recorded: [2, 1, 3], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [0]") { - assertValueObservationRecordingMatch(recorded: [0, 3], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [0]") { - assertValueObservationRecordingMatch(recorded: [0, 1, 2, 3], expected: expected) - } + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 3, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 1, 3, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 2, 3], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 3], expected: expected)) + + XCTAssertFalse(valueObservationRecordingMatch(recorded: [], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [3, 2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2, 1, 3], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [0, 3], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [0, 1, 2, 3], expected: expected)) } do { let expected = [1, 2, 1] - assertValueObservationRecordingMatch(recorded: [1], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 1], expected: expected) - assertValueObservationRecordingMatch(recorded: [2, 1], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 2, 1], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 1, 2, 1], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 2, 1, 1], expected: expected) - assertValueObservationRecordingMatch(recorded: [1, 2, 2, 1], expected: expected) - assertFailure("failed - missing expected value 1") { - assertValueObservationRecordingMatch(recorded: [], expected: expected) - } - assertFailure("failed - missing expected value 1") { - assertValueObservationRecordingMatch(recorded: [2], expected: expected) - } - assertFailure("failed - missing expected value 1") { - assertValueObservationRecordingMatch(recorded: [1, 2], expected: expected) - } - assertFailure("failed - unexpected recorded prefix [0]") { - assertValueObservationRecordingMatch(recorded: [0, 1], expected: expected) - } + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 1, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 1, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 2, 1], expected: expected)) + + XCTAssertFalse(valueObservationRecordingMatch(recorded: [], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [1, 2], expected: expected)) + XCTAssertFalse(valueObservationRecordingMatch(recorded: [0, 1], expected: expected)) + } + do { + let expected = [1, 2, 3, 2, 1] + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 3, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 3, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 3, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 3, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 1], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 3, 1], expected: expected)) + + XCTAssertFalse(valueObservationRecordingMatch(recorded: [3, 1, 2, 1], expected: expected)) + } + do { + let expected = [1, 2, 1, 3, 1, 4] + XCTAssertTrue(valueObservationRecordingMatch(recorded: [4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 1, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 3, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 1, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 3, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 2, 1, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 1, 1, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [1, 3, 1, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [3, 1, 1, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 1, 3, 4], expected: expected)) + XCTAssertTrue(valueObservationRecordingMatch(recorded: [2, 3, 1, 4], expected: expected)) + + XCTAssertFalse(valueObservationRecordingMatch(recorded: [3, 2, 4], expected: expected)) } } } diff --git a/Tests/GRDBTests/ValueObservationTests.swift b/Tests/GRDBTests/ValueObservationTests.swift index 3a2a7d49ae..f5c1e49088 100644 --- a/Tests/GRDBTests/ValueObservationTests.swift +++ b/Tests/GRDBTests/ValueObservationTests.swift @@ -44,7 +44,7 @@ class ValueObservationTests: GRDBTestCase { throw error } } - + // Start observation var errorCaught = false let cancellable = observation.start( @@ -52,7 +52,7 @@ class ValueObservationTests: GRDBTestCase { onError: { _ in errorCaught = true notificationExpectation.fulfill() - }, + }, onChange: { XCTAssertFalse(errorCaught) nextError = TestError() @@ -61,7 +61,7 @@ class ValueObservationTests: GRDBTestCase { try! dbWriter.writeWithoutTransaction { db in try db.execute(sql: "INSERT INTO t DEFAULT VALUES") } - }) + }) withExtendedLifetime(cancellable) { waitForExpectations(timeout: 2, handler: nil) @@ -201,7 +201,7 @@ class ValueObservationTests: GRDBTestCase { onError: { error in XCTFail("Unexpected error: \(error)") }, onChange: { _ in expectation.fulfill() - }) + }) try withExtendedLifetime(cancellable) { try dbQueue.writeWithoutTransaction { db in try db.execute(sql: """ @@ -245,7 +245,7 @@ class ValueObservationTests: GRDBTestCase { onError: { error in XCTFail("Unexpected error: \(error)") }, onChange: { _ in expectation.fulfill() - }) + }) try withExtendedLifetime(cancellable) { try dbQueue.writeWithoutTransaction { db in try db.execute(sql: """ @@ -289,7 +289,7 @@ class ValueObservationTests: GRDBTestCase { onError: { error in XCTFail("Unexpected error: \(error)") }, onChange: { _ in expectation.fulfill() - }) + }) try withExtendedLifetime(cancellable) { try dbQueue.writeWithoutTransaction { db in try db.execute(sql: """ @@ -349,7 +349,7 @@ class ValueObservationTests: GRDBTestCase { onChange: { count in observedCounts.append(count) expectation.fulfill() - }) + }) withExtendedLifetime(cancellable) { waitForExpectations(timeout: 2, handler: nil) XCTAssertEqual(observedCounts, [0, 0]) @@ -389,7 +389,7 @@ class ValueObservationTests: GRDBTestCase { onChange: { count in observedCounts.append(count) expectation.fulfill() - }) + }) withExtendedLifetime(cancellable) { waitForExpectations(timeout: 2, handler: nil) XCTAssertEqual(observedCounts, [0, 0]) @@ -412,8 +412,8 @@ class ValueObservationTests: GRDBTestCase { DispatchQueue.main.asyncAfter(deadline: .now() + 1) { try! dbPool.write { db in try db.execute(sql: """ - INSERT INTO t DEFAULT VALUES; - """) + INSERT INTO t DEFAULT VALUES; + """) } } } @@ -523,6 +523,49 @@ class ValueObservationTests: GRDBTestCase { } #endif + // MARK: - Unspecified Changes + + func test_ValueObservation_is_triggered_by_explicit_change_notification() throws { + let dbQueue1 = try makeDatabaseQueue(filename: "test.sqlite") + try dbQueue1.write { db in + try db.execute(sql: "CREATE TABLE test(a)") + } + + let undetectedExpectation = expectation(description: "undetected") + undetectedExpectation.expectedFulfillmentCount = 2 // initial value and change + undetectedExpectation.isInverted = true + + let detectedExpectation = expectation(description: "detected") + detectedExpectation.expectedFulfillmentCount = 2 // initial value and change + + let observation = ValueObservation.tracking { db in + try Table("test").fetchCount(db) + } + let cancellable = observation.start( + in: dbQueue1, + scheduling: .immediate, + onError: { error in XCTFail("Unexpected error: \(error)") }, + onChange: { _ in + undetectedExpectation.fulfill() + detectedExpectation.fulfill() + }) + + try withExtendedLifetime(cancellable) { + // Change performed from external connection is not detected... + let dbQueue2 = try makeDatabaseQueue(filename: "test.sqlite") + try dbQueue2.write { db in + try db.execute(sql: "INSERT INTO test (a) VALUES (1)") + } + wait(for: [undetectedExpectation], timeout: 2) + + // ... until we perform an explicit change notification + try dbQueue1.write { db in + try db.notifyChanges(in: Table("test")) + } + wait(for: [detectedExpectation], timeout: 2) + } + } + // MARK: - Cancellation func testCancellableLifetime() throws { @@ -542,7 +585,7 @@ class ValueObservationTests: GRDBTestCase { } // Start observation and deallocate cancellable after second change - var cancellable: DatabaseCancellable? + var cancellable: (any DatabaseCancellable)? cancellable = observation.start( in: dbQueue, onError: { error in XCTFail("Unexpected error: \(error)") }, @@ -552,7 +595,7 @@ class ValueObservationTests: GRDBTestCase { cancellable = nil } notificationExpectation.fulfill() - }) + }) // notified try dbQueue.write { db in @@ -588,7 +631,7 @@ class ValueObservationTests: GRDBTestCase { } // Start observation and cancel cancellable after second change - var cancellable: DatabaseCancellable! + var cancellable: (any DatabaseCancellable)! cancellable = observation.start( in: dbQueue, onError: { error in XCTFail("Unexpected error: \(error)") }, @@ -598,7 +641,7 @@ class ValueObservationTests: GRDBTestCase { cancellable.cancel() } notificationExpectation.fulfill() - }) + }) try withExtendedLifetime(cancellable) { // notified @@ -620,13 +663,13 @@ class ValueObservationTests: GRDBTestCase { // Test that observation stops when cancellable is deallocated func test(_ dbWriter: some DatabaseWriter) throws { try dbWriter.write { try $0.execute(sql: "CREATE TABLE t(id INTEGER PRIMARY KEY AUTOINCREMENT)") } - + let notificationExpectation = expectation(description: "notification") notificationExpectation.isInverted = true notificationExpectation.expectedFulfillmentCount = 2 - + do { - var cancellable: DatabaseCancellable? = nil + var cancellable: (any DatabaseCancellable)? = nil _ = cancellable // Avoid "Variable 'cancellable' was written to, but never read" warning var shouldStopObservation = false let observation = ValueObservation( @@ -649,13 +692,13 @@ class ValueObservationTests: GRDBTestCase { notificationExpectation.fulfill() }) } - + try dbWriter.write { db in try db.execute(sql: "INSERT INTO t DEFAULT VALUES") } waitForExpectations(timeout: 2, handler: nil) } - + try test(makeDatabaseQueue()) try test(makeDatabasePool()) } @@ -670,7 +713,7 @@ class ValueObservationTests: GRDBTestCase { notificationExpectation.expectedFulfillmentCount = 2 do { - var cancellable: DatabaseCancellable? = nil + var cancellable: (any DatabaseCancellable)? = nil _ = cancellable // Avoid "Variable 'cancellable' was written to, but never read" warning var shouldStopObservation = false let observation = ValueObservation( @@ -756,7 +799,7 @@ class ValueObservationTests: GRDBTestCase { // MARK: - Async Await - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_values_prefix() async throws { func test(_ writer: some DatabaseWriter) async throws { // We need something to change @@ -769,7 +812,7 @@ class ValueObservationTests: GRDBTestCase { .trackingConstantRegion(Table("t").fetchCount) .handleEvents(didCancel: { cancellationExpectation.fulfill() }) - for try await count in try observation.values(in: writer).prefix(while: { $0 < 3 }) { + for try await count in try observation.values(in: writer).prefix(while: { $0 <= 3 }) { counts.append(count) try await writer.write { try $0.execute(sql: "INSERT INTO t DEFAULT VALUES") } } @@ -777,12 +820,16 @@ class ValueObservationTests: GRDBTestCase { } let counts = try await task.value - - // All values were published - assertValueObservationRecordingMatch(recorded: counts, expected: [0, 1, 2]) + XCTAssertTrue(counts.contains(0)) + XCTAssertTrue(counts.contains(where: { $0 >= 2 })) + XCTAssertEqual(counts.sorted(), counts) // Observation was ended +#if compiler(>=5.8) + await fulfillment(of: [cancellationExpectation], timeout: 2) +#else wait(for: [cancellationExpectation], timeout: 2) +#endif } try await AsyncTest(test).run { try DatabaseQueue() } @@ -790,7 +837,7 @@ class ValueObservationTests: GRDBTestCase { try await AsyncTest(test).runAtTemporaryDatabasePath { try DatabasePool(path: $0) } } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_values_prefix_immediate_scheduling() async throws { func test(_ writer: some DatabaseWriter) async throws { // We need something to change @@ -803,7 +850,7 @@ class ValueObservationTests: GRDBTestCase { .trackingConstantRegion(Table("t").fetchCount) .handleEvents(didCancel: { cancellationExpectation.fulfill() }) - for try await count in try observation.values(in: writer, scheduling: .immediate).prefix(while: { $0 < 3 }) { + for try await count in try observation.values(in: writer, scheduling: .immediate).prefix(while: { $0 <= 3 }) { counts.append(count) try await writer.write { try $0.execute(sql: "INSERT INTO t DEFAULT VALUES") } } @@ -811,12 +858,16 @@ class ValueObservationTests: GRDBTestCase { } let counts = try await task.value - - // All values were published - assertValueObservationRecordingMatch(recorded: counts, expected: [0, 1, 2]) + XCTAssertTrue(counts.contains(0)) + XCTAssertTrue(counts.contains(where: { $0 >= 2 })) + XCTAssertEqual(counts.sorted(), counts) // Observation was ended +#if compiler(>=5.8) + await fulfillment(of: [cancellationExpectation], timeout: 2) +#else wait(for: [cancellationExpectation], timeout: 2) +#endif } try await AsyncTest(test).run { try DatabaseQueue() } @@ -824,7 +875,7 @@ class ValueObservationTests: GRDBTestCase { try await AsyncTest(test).runAtTemporaryDatabasePath { try DatabasePool(path: $0) } } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_values_break() async throws { func test(_ writer: some DatabaseWriter) async throws { // We need something to change @@ -839,7 +890,7 @@ class ValueObservationTests: GRDBTestCase { for try await count in observation.values(in: writer) { counts.append(count) - if count == 2 { + if count > 3 { break } else { try await writer.write { try $0.execute(sql: "INSERT INTO t DEFAULT VALUES") } @@ -849,12 +900,16 @@ class ValueObservationTests: GRDBTestCase { } let counts = try await task.value - - // All values were published - assertValueObservationRecordingMatch(recorded: counts, expected: [0, 1, 2]) + XCTAssertTrue(counts.contains(0)) + XCTAssertTrue(counts.contains(where: { $0 >= 2 })) + XCTAssertEqual(counts.sorted(), counts) // Observation was ended +#if compiler(>=5.8) + await fulfillment(of: [cancellationExpectation], timeout: 2) +#else wait(for: [cancellationExpectation], timeout: 2) +#endif } try await AsyncTest(test).run { try DatabaseQueue() } @@ -862,7 +917,7 @@ class ValueObservationTests: GRDBTestCase { try await AsyncTest(test).runAtTemporaryDatabasePath { try DatabasePool(path: $0) } } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_values_immediate_break() async throws { func test(_ writer: some DatabaseWriter) async throws { // We need something to change @@ -889,7 +944,11 @@ class ValueObservationTests: GRDBTestCase { assertValueObservationRecordingMatch(recorded: counts, expected: [0]) // Observation was ended +#if compiler(>=5.8) + await fulfillment(of: [cancellationExpectation], timeout: 2) +#else wait(for: [cancellationExpectation], timeout: 2) +#endif } try await AsyncTest(test).run { try DatabaseQueue() } @@ -897,7 +956,7 @@ class ValueObservationTests: GRDBTestCase { try await AsyncTest(test).runAtTemporaryDatabasePath { try DatabasePool(path: $0) } } - @available(macOS 10.15, iOS 13, tvOS 13, watchOS 6, *) + @available(iOS 13, macOS 10.15, tvOS 13, watchOS 6, *) func testAsyncAwait_values_cancelled() async throws { func test(_ writer: some DatabaseWriter) async throws { // We need something to change @@ -920,7 +979,7 @@ class ValueObservationTests: GRDBTestCase { Task { let observation = ValueObservation.trackingConstantRegion(Table("t").fetchCount) for try await count in observation.values(in: writer) { - if count == 3 { + if count >= 3 { cancelledTask.cancel() break } else { @@ -936,11 +995,306 @@ class ValueObservationTests: GRDBTestCase { XCTAssertEqual(cancelledValue, "cancelled loop") // Make sure observation was cancelled as well +#if compiler(>=5.8) + await fulfillment(of: [cancellationExpectation], timeout: 2) +#else wait(for: [cancellationExpectation], timeout: 2) +#endif } try await AsyncTest(test).run { try DatabaseQueue() } try await AsyncTest(test).runAtTemporaryDatabasePath { try DatabaseQueue(path: $0) } try await AsyncTest(test).runAtTemporaryDatabasePath { try DatabasePool(path: $0) } } + + // An attempt at finding a regression test for + func testManyObservations() throws { + // We'll start many observations + let observationCount = 100 + dbConfiguration.maximumReaderCount = 5 + + func test(_ writer: some DatabaseWriter, scheduling scheduler: some ValueObservationScheduler) throws { + try writer.write { + try $0.execute(sql: "CREATE TABLE t(id INTEGER PRIMARY KEY AUTOINCREMENT)") + } + let observation = ValueObservation.tracking { + try Table("t").fetchCount($0) + } + + let initialValueExpectation = self.expectation(description: "") +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) + initialValueExpectation.assertForOverFulfill = true +#else + // ValueObservation on DatabasePool will notify the first value twice + initialValueExpectation.assertForOverFulfill = false +#endif + initialValueExpectation.expectedFulfillmentCount = observationCount + + let secondValueExpectation = self.expectation(description: "") + secondValueExpectation.expectedFulfillmentCount = observationCount + + var cancellables: [AnyDatabaseCancellable] = [] + for _ in 0.. + func testManyObservationsWithLongConcurrentWrite() throws { + // We'll start many observations + let observationCount = 100 + dbConfiguration.maximumReaderCount = 5 + + func test(_ writer: some DatabaseWriter, scheduling scheduler: some ValueObservationScheduler) throws { + try writer.write { + try $0.execute(sql: "CREATE TABLE t(id INTEGER PRIMARY KEY AUTOINCREMENT)") + } + let observation = ValueObservation.tracking { + return try Table("t").fetchCount($0) + } + + let initialValueExpectation = self.expectation(description: "") +#if SQLITE_ENABLE_SNAPSHOT || (!GRDBCUSTOMSQLITE && !GRDBCIPHER && (compiler(>=5.7.1) || !(os(macOS) || targetEnvironment(macCatalyst)))) + initialValueExpectation.assertForOverFulfill = true +#else + // ValueObservation on DatabasePool will notify the first value twice + initialValueExpectation.assertForOverFulfill = false +#endif + initialValueExpectation.expectedFulfillmentCount = observationCount + + let secondValueExpectation = self.expectation(description: "") + secondValueExpectation.expectedFulfillmentCount = observationCount + + let semaphore = DispatchSemaphore(value: 0) + writer.asyncWriteWithoutTransaction { db in + semaphore.signal() + Thread.sleep(forTimeInterval: 0.5) + } + semaphore.wait() + + var cancellables: [AnyDatabaseCancellable] = [] + for _ in 0.. + func testIssue1362() throws { + func test(_ writer: some DatabaseWriter) throws { + try writer.write { try $0.execute(sql: "CREATE TABLE s(id INTEGER PRIMARY KEY AUTOINCREMENT)") } + var cancellables = [AnyDatabaseCancellable]() + + // Start an observation and wait until it has installed its + // transaction observer. + let installedExpectation = expectation(description: "transaction observer installed") + let finalExpectation = expectation(description: "final value") + let initialObservation = ValueObservation.trackingConstantRegion(Table("s").fetchCount) + let cancellable = initialObservation.start( + in: writer, + // Immediate initial value so that the next value comes + // from the write access that installs the transaction observer. + scheduling: .immediate, + onError: { error in XCTFail("Unexpected error: \(error)") }, + onChange: { count in + if count == 1 { + installedExpectation.fulfill() + } + if count == 2 { + finalExpectation.fulfill() + } + }) + cancellables.append(cancellable) + try writer.write { try $0.execute(sql: "INSERT INTO s DEFAULT VALUES") } // count = 1 + wait(for: [installedExpectation], timeout: 2) + + // Start a write that will trigger initialObservation when we decide. + let semaphore = DispatchSemaphore(value: 0) + writer.asyncWriteWithoutTransaction { db in + semaphore.wait() + do { + try db.execute(sql: "INSERT INTO s DEFAULT VALUES") // count = 2 + } catch { + XCTFail("Unexpected error: \(error)") + } + } + + // Start as many observations as there are readers + for _ in 0.. + func testIssue1383() throws { + do { + let dbPool = try makeDatabasePool(filename: "test") + try dbPool.writeWithoutTransaction { db in + try db.execute(sql: "CREATE TABLE t(a)") + // Truncate the wal file (size zero) + try db.checkpoint(.truncate) + } + } + + do { + let dbPool = try makeDatabasePool(filename: "test") + let observation = ValueObservation.tracking(Table("t").fetchCount) + _ = observation.start( + in: dbPool, scheduling: .immediate, + onError: { error in + XCTFail("Unexpected error \(error)") + }, + onChange: { _ in + }) + } + } + + // Regression test for + func testIssue1383_async() throws { + do { + let dbPool = try makeDatabasePool(filename: "test") + try dbPool.writeWithoutTransaction { db in + try db.execute(sql: "CREATE TABLE t(a)") + // Truncate the wal file (size zero) + try db.checkpoint(.truncate) + } + } + + do { + let dbPool = try makeDatabasePool(filename: "test") + let observation = ValueObservation.tracking(Table("t").fetchCount) + let expectation = self.expectation(description: "completion") + expectation.assertForOverFulfill = false + let cancellable = observation.start( + in: dbPool, + onError: { error in + XCTFail("Unexpected error \(error)") + expectation.fulfill() + }, + onChange: { _ in + expectation.fulfill() + }) + withExtendedLifetime(cancellable) { _ in + wait(for: [expectation], timeout: 2) + } + } + } + + // Regression test for + func testIssue1383_createWal() throws { + let url = testBundle.url(forResource: "Issue1383", withExtension: "sqlite")! + // Delete files created by previous test runs + try? FileManager.default.removeItem(at: url.deletingLastPathComponent().appendingPathComponent("Issue1383.sqlite-wal")) + try? FileManager.default.removeItem(at: url.deletingLastPathComponent().appendingPathComponent("Issue1383.sqlite-shm")) + + let dbPool = try DatabasePool(path: url.path) + let observation = ValueObservation.tracking(Table("t").fetchCount) + _ = observation.start( + in: dbPool, scheduling: .immediate, + onError: { error in + XCTFail("Unexpected error \(error)") + }, + onChange: { _ in + }) + } + + // Regression test for + func testIssue1500() throws { + let pool = try makeDatabasePool() + + try pool.read { db in + _ = try db.tableExists("t") + } + + try pool.write { db in + try db.create(table: "t") { t in + t.column("a") + } + } + + _ = ValueObservation + .trackingConstantRegion { db in + try db.tableExists("t") + } + .start( + in: pool, + scheduling: .immediate, + onError: { error in + XCTFail("Unexpected error \(error)") + }, + onChange: { value in + XCTAssertEqual(value, true) + }) + } } diff --git a/Tests/Performance/GRDBPerformance/ArgumentsTests.swift b/Tests/Performance/GRDBPerformance/ArgumentsTests.swift new file mode 100644 index 0000000000..ec9ae88df2 --- /dev/null +++ b/Tests/Performance/GRDBPerformance/ArgumentsTests.swift @@ -0,0 +1,113 @@ +import XCTest +import GRDB + +class ArgumentsTests: XCTestCase { + static let shortString = "foo" + static let longString = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Phasellus consectetur felis eget nibh aliquet ullamcorper. Nam sodales, tellus a cursus tincidunt, arcu purus suscipit elit, nec congue erat ipsum a purus." + + var dbDirectoryPath: String! + var dbQueue: DatabaseQueue! + + override func setUpWithError() throws { + let dbDirectoryName = "ArgumentsTests-\(ProcessInfo.processInfo.globallyUniqueString)" + dbDirectoryPath = (NSTemporaryDirectory() as NSString).appendingPathComponent(dbDirectoryName) + try FileManager.default.createDirectory(atPath: dbDirectoryPath, withIntermediateDirectories: true) + let dbPath = (dbDirectoryPath as NSString).appendingPathComponent("db.sqlite") + dbQueue = try DatabaseQueue(path: dbPath) + } + + override func tearDownWithError() throws { + dbQueue = nil + try FileManager.default.removeItem(atPath: dbDirectoryPath) + } + + func test_shortString_legacy_performance_() throws { + try dbQueue.write { db in + try db.execute(sql: "CREATE TABLE t(a)") + + let statement = try db.makeStatement(sql: "INSERT INTO t(a) VALUES (?)") + let arguments: StatementArguments = [Self.shortString] + measure { + for _ in 0..<1_000_000 { + // Simulate old implementation of statement.execute(arguments: arguments) + try! statement.setArguments(arguments) + try! statement.execute() + } + } + } + } + + func test_shortString_SQLITE_STATIC_performance() throws { + try dbQueue.write { db in + try db.execute(sql: "CREATE TABLE t(a)") + + let statement = try db.makeStatement(sql: "INSERT INTO t(a) VALUES (?)") + let arguments: StatementArguments = [Self.shortString] + measure { + for _ in 0..<1_000_000 { + try! statement.execute(arguments: arguments) + } + } + } + } + + func test_shortString_SQLITE_TRANSIENT_performance() throws { + try dbQueue.write { db in + try db.execute(sql: "CREATE TABLE t(a)") + + let statement = try db.makeStatement(sql: "INSERT INTO t(a) VALUES (?)") + let arguments: StatementArguments = [Self.shortString] + try statement.setArguments(arguments) + measure { + for _ in 0..<1_000_000 { + try! statement.execute() + } + } + } + } + + func test_longString_legacy_performance_() throws { + try dbQueue.write { db in + try db.execute(sql: "CREATE TABLE t(a)") + + let statement = try db.makeStatement(sql: "INSERT INTO t(a) VALUES (?)") + let arguments: StatementArguments = [Self.longString] + measure { + for _ in 0..<1_000_000 { + // Simulate old implementation of statement.execute(arguments: arguments) + try! statement.setArguments(arguments) + try! statement.execute() + } + } + } + } + + func test_longString_SQLITE_STATIC_performance() throws { + try dbQueue.write { db in + try db.execute(sql: "CREATE TABLE t(a)") + + let statement = try db.makeStatement(sql: "INSERT INTO t(a) VALUES (?)") + let arguments: StatementArguments = [Self.longString] + measure { + for _ in 0..<1_000_000 { + try! statement.execute(arguments: arguments) + } + } + } + } + + func test_longString_SQLITE_TRANSIENT_performance() throws { + try dbQueue.write { db in + try db.execute(sql: "CREATE TABLE t(a)") + + let statement = try db.makeStatement(sql: "INSERT INTO t(a) VALUES (?)") + let arguments: StatementArguments = [Self.longString] + try! statement.setArguments(arguments) + measure { + for _ in 0..<1_000_000 { + try! statement.execute() + } + } + } + } +} diff --git a/Tests/Performance/GRDBPerformance/FetchRecordOptimizedTests.swift b/Tests/Performance/GRDBPerformance/FetchRecordOptimizedTests.swift index 9921c19d8a..3195e3003b 100644 --- a/Tests/Performance/GRDBPerformance/FetchRecordOptimizedTests.swift +++ b/Tests/Performance/GRDBPerformance/FetchRecordOptimizedTests.swift @@ -29,7 +29,7 @@ private struct Item: Codable, FetchableRecord, PersistableRecord { i9 = row[9] } - static let databaseSelection: [SQLSelectable] = [ + static let databaseSelection: [any SQLSelectable] = [ Column("i0"), Column("i1"), Column("i2"), diff --git a/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.pbxproj b/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.pbxproj index d09feca28b..18c7372e0d 100755 --- a/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.pbxproj +++ b/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.pbxproj @@ -3,7 +3,7 @@ archiveVersion = 1; classes = { }; - objectVersion = 52; + objectVersion = 54; objects = { /* Begin PBXBuildFile section */ @@ -31,6 +31,7 @@ 5690AFDC212058CB001530EA /* FetchRecordDecodableTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 5690AFDA212058CB001530EA /* FetchRecordDecodableTests.swift */; }; 56A3FEA827A85F4800B0292E /* Realm in Frameworks */ = {isa = PBXBuildFile; productRef = 56A3FEA727A85F4800B0292E /* Realm */; }; 56A3FEAA27A85F4800B0292E /* RealmSwift in Frameworks */ = {isa = PBXBuildFile; productRef = 56A3FEA927A85F4800B0292E /* RealmSwift */; }; + 56A6F4AC29BBC8E200E22662 /* ArgumentsTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56A6F4AB29BBC8E200E22662 /* ArgumentsTests.swift */; }; 56B6D0E52618BF78003CC455 /* FetchRecordOptimizedTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B6D0E42618BF78003CC455 /* FetchRecordOptimizedTests.swift */; }; 56B6D0E62618BF78003CC455 /* FetchRecordOptimizedTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B6D0E42618BF78003CC455 /* FetchRecordOptimizedTests.swift */; }; 56B6D0EA2618C00C003CC455 /* InsertRecordOptimizedTests.swift in Sources */ = {isa = PBXBuildFile; fileRef = 56B6D0E92618C00C003CC455 /* InsertRecordOptimizedTests.swift */; }; @@ -97,6 +98,7 @@ 567986AD23A378CD0076902D /* GRDB.xcodeproj */ = {isa = PBXFileReference; lastKnownFileType = "wrapper.pb-project"; name = GRDB.xcodeproj; path = ../../../GRDB.xcodeproj; sourceTree = ""; }; 5690AFD72120589A001530EA /* InsertRecordEncodableTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = InsertRecordEncodableTests.swift; sourceTree = ""; }; 5690AFDA212058CB001530EA /* FetchRecordDecodableTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FetchRecordDecodableTests.swift; sourceTree = ""; }; + 56A6F4AB29BBC8E200E22662 /* ArgumentsTests.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ArgumentsTests.swift; sourceTree = ""; }; 56B6D0E42618BF78003CC455 /* FetchRecordOptimizedTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FetchRecordOptimizedTests.swift; sourceTree = ""; }; 56B6D0E92618C00C003CC455 /* InsertRecordOptimizedTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = InsertRecordOptimizedTests.swift; sourceTree = ""; }; 56BB86121BA9886D001F9168 /* InsertRecordClassTests.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = InsertRecordClassTests.swift; sourceTree = ""; }; @@ -143,7 +145,7 @@ isa = PBXGroup; children = ( 567986B923A378CD0076902D /* GRDB.framework */, - 567986BB23A378CD0076902D /* GRDBOSXTests.xctest */, + 567986BB23A378CD0076902D /* GRDBTests.xctest */, ); name = Products; sourceTree = ""; @@ -160,6 +162,7 @@ children = ( 56BB862D1BA98933001F9168 /* GRDBPerformanceComparisonTests-Bridging.h */, 56DE7B2D1C42B23B00861EB8 /* PerformanceModel.xcdatamodeld */, + 56A6F4AB29BBC8E200E22662 /* ArgumentsTests.swift */, 567071FA208A509C006AD95A /* DateParsingTests.swift */, 56DE7B271C41302500861EB8 /* FetchNamedValuesTests.swift */, 56DE7B291C4130AF00861EB8 /* FetchPositionalValuesTests.swift */, @@ -307,7 +310,7 @@ remoteRef = 567986B823A378CD0076902D /* PBXContainerItemProxy */; sourceTree = BUILT_PRODUCTS_DIR; }; - 567986BB23A378CD0076902D /* GRDBOSXTests.xctest */ = { + 567986BB23A378CD0076902D /* GRDBTests.xctest */ = { isa = PBXReferenceProxy; fileType = wrapper.cfbundle; path = GRDBTests.xctest; @@ -346,6 +349,7 @@ 56DE7B281C41302500861EB8 /* FetchNamedValuesTests.swift in Sources */, 5690AFD82120589A001530EA /* InsertRecordEncodableTests.swift in Sources */, 56B6D0EA2618C00C003CC455 /* InsertRecordOptimizedTests.swift in Sources */, + 56A6F4AC29BBC8E200E22662 /* ArgumentsTests.swift in Sources */, 5690AFDB212058CB001530EA /* FetchRecordDecodableTests.swift in Sources */, 56D507831F6D7B2E00AE1C5B /* InsertRecordStructTests.swift in Sources */, 56DE7B241C412F7E00861EB8 /* InsertPositionalValuesTests.swift in Sources */, diff --git a/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved b/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved index 871798b7ea..65281ac4e5 100644 --- a/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved +++ b/Tests/Performance/GRDBPerformance/GRDBPerformance.xcodeproj/project.xcworkspace/xcshareddata/swiftpm/Package.resolved @@ -1,21 +1,22 @@ { + "originHash" : "8d182a38fc35b0d50c198e0cbd39dc7fa5922eae5336ff3b0c73c2d51bcba752", "pins" : [ { "identity" : "fmdb", "kind" : "remoteSourceControl", "location" : "https://github.com/ccgus/fmdb.git", "state" : { - "revision" : "61e51fde7f7aab6554f30ab061cc588b28a97d04", - "version" : "2.7.7" + "revision" : "47a2fa12a242b5a2fe13b916c22f2212e426055c", + "version" : "2.7.9" } }, { "identity" : "realm-core", "kind" : "remoteSourceControl", - "location" : "https://github.com/realm/realm-core", + "location" : "https://github.com/realm/realm-core.git", "state" : { - "revision" : "3c4b86da569368c4ee5ccca2a112d511cc6fa936", - "version" : "12.11.0" + "revision" : "374dd672af357732dccc135fecc905406fec3223", + "version" : "14.4.1" } }, { @@ -23,8 +24,8 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/realm/realm-swift.git", "state" : { - "revision" : "dc1672b3011eb992ce95db4feec83cd897626a16", - "version" : "10.32.1" + "revision" : "e0c2fbb442979fbf1e4be80e01d142f310a9c762", + "version" : "10.49.1" } }, { @@ -32,10 +33,10 @@ "kind" : "remoteSourceControl", "location" : "https://github.com/stephencelis/SQLite.swift.git", "state" : { - "revision" : "4d543d811ee644fa4cc4bfa0be996b4dd6ba0f54", - "version" : "0.13.3" + "revision" : "e78ae0220e17525a15ac68c697a155eb7a672a8e", + "version" : "0.15.0" } } ], - "version" : 2 + "version" : 3 } diff --git a/Tests/generatePerformanceReport.rb b/Tests/generatePerformanceReport.rb index ec8ae52867..2af6a9d45a 100755 --- a/Tests/generatePerformanceReport.rb +++ b/Tests/generatePerformanceReport.rb @@ -40,7 +40,7 @@ def formatted_samples(samples, test) # DERIVED_DATA tmp = BUILD_ROOT -while !File.exists?(File.join(tmp, 'SourcePackages')) +while !File.exist?(File.join(tmp, 'SourcePackages')) parent = File.dirname(tmp) exit 1 if tmp == parent tmp = parent @@ -54,7 +54,7 @@ def formatted_samples(samples, test) GRDB_VERSION = info_plist_version('Support/Info.plist') FMDB_VERSION = info_plist_version("#{SPM_CHECKOUTS}/fmdb/src/fmdb/Info.plist") SQLITE_SWIFT_VERSION = git_tag_version("#{SPM_CHECKOUTS}/SQLite.swift") -REALM_VERSION = git_tag_version("#{SPM_CHECKOUTS}/realm-cocoa") +REALM_VERSION = git_tag_version("#{SPM_CHECKOUTS}/realm-swift") `xcodebuild -version` =~ /Xcode (.*)$/ XCODE_VERSION = $1 diff --git a/assets/README.md.in b/assets/README.md.in new file mode 100644 index 0000000000..6495ec4784 --- /dev/null +++ b/assets/README.md.in @@ -0,0 +1,6479 @@ +# GRDB + SQLCipher + +## What is this? +This is a fork of [GRDB](https://github.com/groue/GRDB.swift) which contains a [SQLCipher Community Edition](https://www.zetetic.net/sqlcipher/open-source/) amalgamation packaged so that it can be consumed as a Swift Package. + +The default branch for this repository is `SQLCipher` so that we can more easily pull upstream changes if we need to. + +## Versioning + +* This Package: *Session-${new_version}* +* GRDB: *${upstream_version}* +* SQLCipher: *${sqlcipher_version}* + +## Contributions +We do not accept contributions to this repository at this time. However, feel free to open an issue in order to start a discussion. + +## Updating from Upstream + +Add remote upstream: + +* `git remote add upstream git@github.com:groue/GRDB.swift.git` + +Check out upstream's master branch locally: + +* `git fetch upstream +master:upstream-master && git checkout upstream-master` + +Update upstream's master branch if needed: + +* `git pull upstream master` + +Switch back to the `SQLCipher` branch and merge with upstream-master: + +* `git merge upstream-master` + +Resolve any conflicts that may occur (normally there should be none or only in Package.swift) +and commit the merge. Once done, run `prepare_release.sh` script to fetch and compile the latest tag +of SQLCipher and embed it in GRDB.swift: + +* `./prepare_release.sh` + +The script will also: +* present the summary of updated versions and ask you to pick the new version number for DuckDuckGo GRDB fork, +* test the build, +* create a new release branch and commit changes. + +For versioning, follow [Semantic Versioning Rules](https://semver.org), but note you don't need +to use the same version as GRDB. Examples: + +* Upstream GRDB 5.6.0, after merge -> 5.12.0 + * This project 1.0.0 -> 1.1.0 + +* Upstream GRDB 5.12.0, after merge -> 6.0.0 + * This project 1.1.0 -> 2.0.0 + +If everything looks fine: +* push your branch, +* create PR for BSK referencing the new branch, +* create PRs for iOS and macOS apps referencing your BSK branch. + +Once approved: +* merge your branch back to `SQLCipher`, +* create a tag matching the release number **without the 'v' prefix** (those are reserved for upstream), +* push the tag, +* update the reference to GRDB in BSK to point to a tag. + +### Compiling SQLCipher manually + +In case `prepare_release.sh` script fails, you need to compile SQLCipher amalgamation package +manually. See [general instructions](https://github.com/sqlcipher/sqlcipher#compiling-for-unix-like-systems): + +* Use `./configure --with-crypto-lib=none`. +* Remember to use `make sqlite3.c` and not `make`. +* Copy `sqlite3.c` and `sqlite3.h` to `Sources/SQLCipher/sqlite3.c` and `Sources/SQLCipher/include/sqlite3.h`. + + +-- + + + + + GRDB: A toolkit for SQLite databases, with a focus on application development. + + +

    + A toolkit for SQLite databases, with a focus on application development
    + Proudly serving the community since 2015 +

    + +

    + Swift 5.7 + License + CI Status +

    + +**Latest release**: April 21, 2024 • [version 6.27.0](https://github.com/groue/GRDB.swift/tree/v6.27.0) • [CHANGELOG](CHANGELOG.md) • [Migrating From GRDB 5 to GRDB 6](Documentation/GRDB6MigrationGuide.md) + +**Requirements**: iOS 11.0+ / macOS 10.13+ / tvOS 11.0+ / watchOS 4.0+ • SQLite 3.19.3+ • Swift 5.7+ / Xcode 14+ + +**Contact**: + +- Release announcements and usage tips: follow [@groue](http://twitter.com/groue) on Twitter, [@groue@hachyderm.io](https://hachyderm.io/@groue) on Mastodon. +- Report bugs in a [Github issue](https://github.com/groue/GRDB.swift/issues/new). Make sure you check the [existing issues](https://github.com/groue/GRDB.swift/issues?q=is%3Aopen) first. +- A question? Looking for advice? Do you wonder how to contribute? Fancy a chat? Go to the [GitHub discussions](https://github.com/groue/GRDB.swift/discussions), or the [GRDB forums](https://forums.swift.org/c/related-projects/grdb). + + +## What is GRDB? + +Use this library to save your application’s permanent data into SQLite databases. It comes with built-in tools that address common needs: + +- **SQL Generation** + + Enhance your application models with persistence and fetching methods, so that you don't have to deal with SQL and raw database rows when you don't want to. + +- **Database Observation** + + Get notifications when database values are modified. + +- **Robust Concurrency** + + Multi-threaded applications can efficiently use their databases, including WAL databases that support concurrent reads and writes. + +- **Migrations** + + Evolve the schema of your database as you ship new versions of your application. + +- **Leverage your SQLite skills** + + Not all developers need advanced SQLite features. But when you do, GRDB is as sharp as you want it to be. Come with your SQL and SQLite skills, or learn new ones as you go! + +--- + +

    + Usage • + Documentation • + Installation • + FAQ +

    + +--- + +## Usage + +
    + Start using the database in four steps + +```swift +import GRDB + +// 1. Open a database connection +let dbQueue = try DatabaseQueue(path: "/path/to/database.sqlite") + +// 2. Define the database schema +try dbQueue.write { db in + try db.create(table: "player") { t in + t.primaryKey("id", .text) + t.column("name", .text).notNull() + t.column("score", .integer).notNull() + } +} + +// 3. Define a record type +struct Player: Codable, FetchableRecord, PersistableRecord { + var id: String + var name: String + var score: Int +} + +// 4. Write and read in the database +try dbQueue.write { db in + try Player(id: "1", name: "Arthur", score: 100).insert(db) + try Player(id: "2", name: "Barbara", score: 1000).insert(db) +} + +let players: [Player] = try dbQueue.read { db in + try Player.fetchAll(db) +} +``` + +
    + +
    + Access to raw SQL + +```swift +try dbQueue.write { db in + try db.execute(sql: """ + CREATE TABLE place ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + title TEXT NOT NULL, + favorite BOOLEAN NOT NULL DEFAULT 0, + latitude DOUBLE NOT NULL, + longitude DOUBLE NOT NULL) + """) + + try db.execute(sql: """ + INSERT INTO place (title, favorite, latitude, longitude) + VALUES (?, ?, ?, ?) + """, arguments: ["Paris", true, 48.85341, 2.3488]) + + let parisId = db.lastInsertedRowID + + // Avoid SQL injection with SQL interpolation + try db.execute(literal: """ + INSERT INTO place (title, favorite, latitude, longitude) + VALUES (\("King's Cross"), \(true), \(51.52151), \(-0.12763)) + """) +} +``` + +See [Executing Updates](#executing-updates) + +
    + +
    + Access to raw database rows and values + +```swift +try dbQueue.read { db in + // Fetch database rows + let rows = try Row.fetchCursor(db, sql: "SELECT * FROM place") + while let row = try rows.next() { + let title: String = row["title"] + let isFavorite: Bool = row["favorite"] + let coordinate = CLLocationCoordinate2D( + latitude: row["latitude"], + longitude: row["longitude"]) + } + + // Fetch values + let placeCount = try Int.fetchOne(db, sql: "SELECT COUNT(*) FROM place")! // Int + let placeTitles = try String.fetchAll(db, sql: "SELECT title FROM place") // [String] +} + +let placeCount = try dbQueue.read { db in + try Int.fetchOne(db, sql: "SELECT COUNT(*) FROM place")! +} +``` + +See [Fetch Queries](#fetch-queries) + +
    + +
    + Database model types aka "records" + +```swift +struct Place { + var id: Int64? + var title: String + var isFavorite: Bool + var coordinate: CLLocationCoordinate2D +} + +// snip: turn Place into a "record" by adopting the protocols that +// provide fetching and persistence methods. + +try dbQueue.write { db in + // Create database table + try db.create(table: "place") { t in + t.autoIncrementedPrimaryKey("id") + t.column("title", .text).notNull() + t.column("favorite", .boolean).notNull().defaults(to: false) + t.column("longitude", .double).notNull() + t.column("latitude", .double).notNull() + } + + var berlin = Place( + id: nil, + title: "Berlin", + isFavorite: false, + coordinate: CLLocationCoordinate2D(latitude: 52.52437, longitude: 13.41053)) + + try berlin.insert(db) + berlin.id // some value + + berlin.isFavorite = true + try berlin.update(db) +} +``` + +See [Records](#records) + +
    + +
    + Query the database with the Swift query interface + +```swift +try dbQueue.read { db in + // Place + let paris = try Place.find(db, id: 1) + + // Place? + let berlin = try Place.filter(Column("title") == "Berlin").fetchOne(db) + + // [Place] + let favoritePlaces = try Place + .filter(Column("favorite") == true) + .order(Column("title")) + .fetchAll(db) + + // Int + let favoriteCount = try Place.filter(Column("favorite")).fetchCount(db) + + // SQL is always welcome + let places = try Place.fetchAll(db, sql: "SELECT * FROM place") +} +``` + +See the [Query Interface](#the-query-interface) + +
    + +
    + Database changes notifications + +```swift +// Define the observed value +let observation = ValueObservation.tracking { db in + try Place.fetchAll(db) +} + +// Start observation +let cancellable = observation.start( + in: dbQueue, + onError: { error in ... }, + onChange: { (places: [Place]) in print("Fresh places: \(places)") }) +``` + +Ready-made support for Combine and RxSwift: + +```swift +// Combine +let cancellable = observation.publisher(in: dbQueue).sink( + receiveCompletion: { completion in ... }, + receiveValue: { (places: [Place]) in print("Fresh places: \(places)") }) + +// RxSwift +let disposable = observation.rx.observe(in: dbQueue).subscribe( + onNext: { (places: [Place]) in print("Fresh places: \(places)") }, + onError: { error in ... }) +``` + +See [Database Observation], [Combine Support], [RxGRDB]. + +
    + +Documentation +============= + +**GRDB runs on top of SQLite**: you should get familiar with the [SQLite FAQ](http://www.sqlite.org/faq.html). For general and detailed information, jump to the [SQLite Documentation](http://www.sqlite.org/docs.html). + + +#### Demo Applications & Frequently Asked Questions + +- [Demo Applications]: Three flavors: vanilla UIKit, Combine + SwiftUI, and Async/Await + SwiftUI. +- [FAQ] + +#### Reference + +- 📖 [GRDB Reference](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/) + +#### Getting Started + +- [Installation](#installation) +- [Database Connections]: Connect to SQLite databases + +#### SQLite and SQL + +- [SQLite API](#sqlite-api): The low-level SQLite API • [executing updates](#executing-updates) • [fetch queries](#fetch-queries) • [SQL Interpolation] + +#### Records and the Query Interface + +- [Records](#records): Fetching and persistence methods for your custom structs and class hierarchies +- [Query Interface](#the-query-interface): A swift way to generate SQL • [create tables](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseschema) • [requests](#requests) • [associations between record types](Documentation/AssociationsBasics.md) + +#### Application Tools + +- [Migrations]: Transform your database as your application evolves. +- [Full-Text Search]: Perform efficient and customizable full-text searches. +- [Database Observation]: Observe database changes and transactions. +- [Encryption](#encryption): Encrypt your database with SQLCipher. +- [Backup](#backup): Dump the content of a database to another. +- [Interrupt a Database](#interrupt-a-database): Abort any pending database operation. +- [Sharing a Database]: How to share an SQLite database between multiple processes - recommendations for App Group containers, App Extensions, App Sandbox, and file coordination. + +#### Good to Know + +- [Concurrency]: How to access databases in a multi-threaded application. +- [Combine](Documentation/Combine.md): Access and observe the database with Combine publishers. +- [Avoiding SQL Injection](#avoiding-sql-injection) +- [Error Handling](#error-handling) +- [Unicode](#unicode) +- [Memory Management](#memory-management) +- [Data Protection](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections) +- :bulb: [Migrating From GRDB 5 to GRDB 6](Documentation/GRDB6MigrationGuide.md) +- :bulb: [Why Adopt GRDB?](Documentation/WhyAdoptGRDB.md) +- :bulb: [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) + +#### Companion Libraries + +- [GRDBQuery](https://github.com/groue/GRDBQuery): Access and observe the database from your SwiftUI views. +- [GRDBSnapshotTesting](https://github.com/groue/GRDBSnapshotTesting): Test your database. + +**[FAQ]** + +**[Sample Code](#sample-code)** + + +Installation +============ + +**The installation procedures below have GRDB use the version of SQLite that ships with the target operating system.** + +See [Encryption](#encryption) for the installation procedure of GRDB with SQLCipher. + +See [Custom SQLite builds](Documentation/CustomSQLiteBuilds.md) for the installation procedure of GRDB with a customized build of SQLite. + + +## CocoaPods + +[CocoaPods](http://cocoapods.org/) is a dependency manager for Xcode projects. To use GRDB with CocoaPods (version 1.2 or higher), specify in your `Podfile`: + +```ruby +pod 'GRDB.swift' +``` + +GRDB can be installed as a framework, or a static library. + +## Swift Package Manager + +The [Swift Package Manager](https://swift.org/package-manager/) automates the distribution of Swift code. To use GRDB with SPM, add a dependency to `https://github.com/groue/GRDB.swift.git` + +GRDB offers two libraries, `GRDB` and `GRDB-dynamic`. Pick only one. When in doubt, prefer `GRDB`. The `GRDB-dynamic` library can reveal useful if you are going to link it with multiple targets within your app and only wish to link to a shared, dynamic framework once. See [How to link a Swift Package as dynamic](https://forums.swift.org/t/how-to-link-a-swift-package-as-dynamic/32062) for more information. + +> **Note**: Linux is not currently supported. +> +> **Warning**: Due to an Xcode bug, you will get "No such module 'CSQLite'" errors when you want to embed the GRDB package in other targets than the main application (watch extensions, for example). UI and Unit testing targets are OK, though. See [#642](https://github.com/groue/GRDB.swift/issues/642#issuecomment-575994093) for more information. + +## Carthage + +[Carthage](https://github.com/Carthage/Carthage) is **unsupported**. For some context about this decision, see [#433](https://github.com/groue/GRDB.swift/issues/433). + + +## Manually + +1. [Download](https://github.com/groue/GRDB.swift/releases) a copy of GRDB, or clone its repository and make sure you checkout the latest tagged version. + +2. Embed the `GRDB.xcodeproj` project in your own project. + +3. Add the `GRDB` target in the **Target Dependencies** section of the **Build Phases** tab of your application target (extension target for WatchOS). + +4. Add the `GRDB.framework` to the **Embedded Binaries** section of the **General** tab of your application target (extension target for WatchOS). + +> :bulb: **Tip**: see the [Demo Applications] for examples of such integration. + + +Database Connections +==================== + +GRDB provides two classes for accessing SQLite databases: [`DatabaseQueue`] and [`DatabasePool`]: + +```swift +import GRDB + +// Pick one: +let dbQueue = try DatabaseQueue(path: "/path/to/database.sqlite") +let dbPool = try DatabasePool(path: "/path/to/database.sqlite") +``` + +The differences are: + +- Database pools allow concurrent database accesses (this can improve the performance of multithreaded applications). +- Database pools open your SQLite database in the [WAL mode](https://www.sqlite.org/wal.html) (unless read-only). +- Database queues support [in-memory databases](https://www.sqlite.org/inmemorydb.html). + +**If you are not sure, choose [`DatabaseQueue`].** You will always be able to switch to [`DatabasePool`] later. + +For more information and tips when opening connections, see [Database Connections](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections). + + +SQLite API +========== + +**In this section of the documentation, we will talk SQL.** Jump to the [query interface](#the-query-interface) if SQL is not your cup of tea. + +- [Executing Updates](#executing-updates) +- [Fetch Queries](#fetch-queries) + - [Fetching Methods](#fetching-methods) + - [Row Queries](#row-queries) + - [Value Queries](#value-queries) +- [Values](#values) + - [Data](#data-and-memory-savings) + - [Date and DateComponents](#date-and-datecomponents) + - [NSNumber, NSDecimalNumber, and Decimal](#nsnumber-nsdecimalnumber-and-decimal) + - [Swift enums](#swift-enums) + - [`DatabaseValueConvertible`]: the protocol for custom value types +- [Transactions and Savepoints] +- [SQL Interpolation] + +Advanced topics: + +- [Prepared Statements] +- [Custom SQL Functions and Aggregates](#custom-sql-functions-and-aggregates) +- [Database Schema Introspection](#database-schema-introspection) +- [Row Adapters](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/rowadapter) +- [Raw SQLite Pointers](#raw-sqlite-pointers) + + +## Executing Updates + +Once granted with a [database connection], the [`execute(sql:arguments:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/execute(sql:arguments:)) method executes the SQL statements that do not return any database row, such as `CREATE TABLE`, `INSERT`, `DELETE`, `ALTER`, etc. + +For example: + +```swift +try dbQueue.write { db in + try db.execute(sql: """ + CREATE TABLE player ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + score INT) + """) + + try db.execute( + sql: "INSERT INTO player (name, score) VALUES (?, ?)", + arguments: ["Barbara", 1000]) + + try db.execute( + sql: "UPDATE player SET score = :score WHERE id = :id", + arguments: ["score": 1000, "id": 1]) + } +} +``` + +The `?` and colon-prefixed keys like `:score` in the SQL query are the **statements arguments**. You pass arguments with arrays or dictionaries, as in the example above. See [Values](#values) for more information on supported arguments types (Bool, Int, String, Date, Swift enums, etc.), and [`StatementArguments`] for a detailed documentation of SQLite arguments. + +You can also embed query arguments right into your SQL queries, with [`execute(literal:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/execute(literal:)), as in the example below. See [SQL Interpolation] for more details. + +```swift +try dbQueue.write { db in + let name = "O'Brien" + let score = 550 + try db.execute(literal: """ + INSERT INTO player (name, score) VALUES (\(name), \(score)) + """) +} +``` + +**Never ever embed values directly in your raw SQL strings**. See [Avoiding SQL Injection](#avoiding-sql-injection) for more information: + +```swift +// WRONG: don't embed values in raw SQL strings +let id = 123 +let name = textField.text +try db.execute( + sql: "UPDATE player SET name = '\(name)' WHERE id = \(id)") + +// CORRECT: use arguments dictionary +try db.execute( + sql: "UPDATE player SET name = :name WHERE id = :id", + arguments: ["name": name, "id": id]) + +// CORRECT: use arguments array +try db.execute( + sql: "UPDATE player SET name = ? WHERE id = ?", + arguments: [name, id]) + +// CORRECT: use SQL Interpolation +try db.execute( + literal: "UPDATE player SET name = \(name) WHERE id = \(id)") +``` + +**Join multiple statements with a semicolon**: + +```swift +try db.execute(sql: """ + INSERT INTO player (name, score) VALUES (?, ?); + INSERT INTO player (name, score) VALUES (?, ?); + """, arguments: ["Arthur", 750, "Barbara", 1000]) + +try db.execute(literal: """ + INSERT INTO player (name, score) VALUES (\("Arthur"), \(750)); + INSERT INTO player (name, score) VALUES (\("Barbara"), \(1000)); + """) +``` + +When you want to make sure that a single statement is executed, use a prepared [`Statement`]. + +**After an INSERT statement**, you can get the row ID of the inserted row with [`lastInsertedRowID`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/lastinsertedrowid): + +```swift +try db.execute( + sql: "INSERT INTO player (name, score) VALUES (?, ?)", + arguments: ["Arthur", 1000]) +let playerId = db.lastInsertedRowID +``` + +Don't miss [Records](#records), that provide classic **persistence methods**: + +```swift +var player = Player(name: "Arthur", score: 1000) +try player.insert(db) +let playerId = player.id +``` + + +## Fetch Queries + +[Database connections] let you fetch database rows, plain values, and custom models aka "records". + +**Rows** are the raw results of SQL queries: + +```swift +try dbQueue.read { db in + if let row = try Row.fetchOne(db, sql: "SELECT * FROM wine WHERE id = ?", arguments: [1]) { + let name: String = row["name"] + let color: Color = row["color"] + print(name, color) + } +} +``` + + +**Values** are the Bool, Int, String, Date, Swift enums, etc. stored in row columns: + +```swift +try dbQueue.read { db in + let urls = try URL.fetchCursor(db, sql: "SELECT url FROM wine") + while let url = try urls.next() { + print(url) + } +} +``` + + +**Records** are your application objects that can initialize themselves from rows: + +```swift +let wines = try dbQueue.read { db in + try Wine.fetchAll(db, sql: "SELECT * FROM wine") +} +``` + +- [Fetching Methods](#fetching-methods) and [Cursors](#cursors) +- [Row Queries](#row-queries) +- [Value Queries](#value-queries) +- [Records](#records) + + +### Fetching Methods + +**Throughout GRDB**, you can always fetch *cursors*, *arrays*, *sets*, or *single values* of any fetchable type (database [row](#row-queries), simple [value](#value-queries), or custom [record](#records)): + +```swift +try Row.fetchCursor(...) // A Cursor of Row +try Row.fetchAll(...) // [Row] +try Row.fetchSet(...) // Set +try Row.fetchOne(...) // Row? +``` + +- `fetchCursor` returns a **[cursor](#cursors)** over fetched values: + + ```swift + let rows = try Row.fetchCursor(db, sql: "SELECT ...") // A Cursor of Row + ``` + +- `fetchAll` returns an **array**: + + ```swift + let players = try Player.fetchAll(db, sql: "SELECT ...") // [Player] + ``` + +- `fetchSet` returns a **set**: + + ```swift + let names = try String.fetchSet(db, sql: "SELECT ...") // Set + ``` + +- `fetchOne` returns a **single optional value**, and consumes a single database row (if any). + + ```swift + let count = try Int.fetchOne(db, sql: "SELECT COUNT(*) ...") // Int? + ``` + +**All those fetching methods require an SQL string that contains a single SQL statement.** When you want to fetch from multiple statements joined with a semicolon, iterate the multiple [prepared statements] found in the SQL string. + +### Cursors + +📖 [`Cursor`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/cursor) + +**Whenever you consume several rows from the database, you can fetch an Array, a Set, or a Cursor**. + +The `fetchAll()` and `fetchSet()` methods return regular Swift array and sets, that you iterate like all other arrays and sets: + +```swift +try dbQueue.read { db in + // [Player] + let players = try Player.fetchAll(db, sql: "SELECT ...") + for player in players { + // use player + } +} +``` + +Unlike arrays and sets, cursors returned by `fetchCursor()` load their results step after step: + +```swift +try dbQueue.read { db in + // Cursor of Player + let players = try Player.fetchCursor(db, sql: "SELECT ...") + while let player = try players.next() { + // use player + } +} +``` + +- **Cursors can not be used on any thread**: you must consume a cursor on the dispatch queue it was created in. Particularly, don't extract a cursor out of a database access method: + + ```swift + // Wrong + let cursor = try dbQueue.read { db in + try Player.fetchCursor(db, ...) + } + while let player = try cursor.next() { ... } + ``` + + Conversely, arrays and sets may be consumed on any thread: + + ```swift + // OK + let array = try dbQueue.read { db in + try Player.fetchAll(db, ...) + } + for player in array { ... } + ``` + +- **Cursors can be iterated only one time.** Arrays and sets can be iterated many times. + +- **Cursors iterate database results in a lazy fashion**, and don't consume much memory. Arrays and sets contain copies of database values, and may take a lot of memory when there are many fetched results. + +- **Cursors are granted with direct access to SQLite,** unlike arrays and sets that have to take the time to copy database values. If you look after extra performance, you may prefer cursors. + +- **Cursors can feed Swift collections.** + + You will most of the time use `fetchAll` or `fetchSet` when you want an array or a set. For more specific needs, you may prefer one of the initializers below. All of them accept an extra optional `minimumCapacity` argument which helps optimizing your app when you have an idea of the number of elements in a cursor (the built-in `fetchAll` and `fetchSet` do not perform such an optimization). + + **Arrays** and all types conforming to `RangeReplaceableCollection`: + + ```swift + // [String] + let cursor = try String.fetchCursor(db, ...) + let array = try Array(cursor) + ``` + + **Sets**: + + ```swift + // Set + let cursor = try Int.fetchCursor(db, ...) + let set = try Set(cursor) + ``` + + **Dictionaries**: + + ```swift + // [Int64: [Player]] + let cursor = try Player.fetchCursor(db) + let dictionary = try Dictionary(grouping: cursor, by: { $0.teamID }) + + // [Int64: Player] + let cursor = try Player.fetchCursor(db).map { ($0.id, $0) } + let dictionary = try Dictionary(uniqueKeysWithValues: cursor) + ``` + +- **Cursors adopt the [Cursor](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/cursor) protocol, which looks a lot like standard [lazy sequences](https://developer.apple.com/reference/swift/lazysequenceprotocol) of Swift.** As such, cursors come with many convenience methods: `compactMap`, `contains`, `dropFirst`, `dropLast`, `drop(while:)`, `enumerated`, `filter`, `first`, `flatMap`, `forEach`, `joined`, `joined(separator:)`, `max`, `max(by:)`, `min`, `min(by:)`, `map`, `prefix`, `prefix(while:)`, `reduce`, `reduce(into:)`, `suffix`: + + ```swift + // Prints all Github links + try URL + .fetchCursor(db, sql: "SELECT url FROM link") + .filter { url in url.host == "github.com" } + .forEach { url in print(url) } + + // An efficient cursor of coordinates: + let locations = try Row. + .fetchCursor(db, sql: "SELECT latitude, longitude FROM place") + .map { row in + CLLocationCoordinate2D(latitude: row[0], longitude: row[1]) + } + ``` + +- **Cursors are not Swift sequences.** That's because Swift sequences can't handle iteration errors, when reading SQLite results may fail at any time. + +- **Cursors require a little care**: + + - Don't modify the results during a cursor iteration: + + ```swift + // Undefined behavior + while let player = try players.next() { + try db.execute(sql: "DELETE ...") + } + ``` + + - Don't turn a cursor of `Row` into an array or a set. You would not get the distinct rows you expect. To get a array of rows, use `Row.fetchAll(...)`. To get a set of rows, use `Row.fetchSet(...)`. Generally speaking, make sure you copy a row whenever you extract it from a cursor for later use: `row.copy()`. + +If you don't see, or don't care about the difference, use arrays. If you care about memory and performance, use cursors when appropriate. + + +### Row Queries + +- [Fetching Rows](#fetching-rows) +- [Column Values](#column-values) +- [DatabaseValue](#databasevalue) +- [Rows as Dictionaries](#rows-as-dictionaries) +- 📖 [`Row`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/row) + + +#### Fetching Rows + +Fetch **cursors** of rows, **arrays**, **sets**, or **single** rows (see [fetching methods](#fetching-methods)): + +```swift +try dbQueue.read { db in + try Row.fetchCursor(db, sql: "SELECT ...", arguments: ...) // A Cursor of Row + try Row.fetchAll(db, sql: "SELECT ...", arguments: ...) // [Row] + try Row.fetchSet(db, sql: "SELECT ...", arguments: ...) // Set + try Row.fetchOne(db, sql: "SELECT ...", arguments: ...) // Row? + + let rows = try Row.fetchCursor(db, sql: "SELECT * FROM wine") + while let row = try rows.next() { + let name: String = row["name"] + let color: Color = row["color"] + print(name, color) + } +} + +let rows = try dbQueue.read { db in + try Row.fetchAll(db, sql: "SELECT * FROM player") +} +``` + +Arguments are optional arrays or dictionaries that fill the positional `?` and colon-prefixed keys like `:name` in the query: + +```swift +let rows = try Row.fetchAll(db, + sql: "SELECT * FROM player WHERE name = ?", + arguments: ["Arthur"]) + +let rows = try Row.fetchAll(db, + sql: "SELECT * FROM player WHERE name = :name", + arguments: ["name": "Arthur"]) +``` + +See [Values](#values) for more information on supported arguments types (Bool, Int, String, Date, Swift enums, etc.), and [`StatementArguments`] for a detailed documentation of SQLite arguments. + +Unlike row arrays that contain copies of the database rows, row cursors are close to the SQLite metal, and require a little care: + +> **Note**: **Don't turn a cursor of `Row` into an array or a set**. You would not get the distinct rows you expect. To get a array of rows, use `Row.fetchAll(...)`. To get a set of rows, use `Row.fetchSet(...)`. Generally speaking, make sure you copy a row whenever you extract it from a cursor for later use: `row.copy()`. + + +#### Column Values + +**Read column values** by index or column name: + +```swift +let name: String = row[0] // 0 is the leftmost column +let name: String = row["name"] // Leftmost matching column - lookup is case-insensitive +let name: String = row[Column("name")] // Using query interface's Column +``` + +Make sure to ask for an optional when the value may be NULL: + +```swift +let name: String? = row["name"] +``` + +The `row[]` subscript returns the type you ask for. See [Values](#values) for more information on supported value types: + +```swift +let bookCount: Int = row["bookCount"] +let bookCount64: Int64 = row["bookCount"] +let hasBooks: Bool = row["bookCount"] // false when 0 + +let string: String = row["date"] // "2015-09-11 18:14:15.123" +let date: Date = row["date"] // Date +self.date = row["date"] // Depends on the type of the property. +``` + +You can also use the `as` type casting operator: + +```swift +row[...] as Int +row[...] as Int? +``` + +> **Warning**: avoid the `as!` and `as?` operators: +> +> ```swift +> if let int = row[...] as? Int { ... } // BAD - doesn't work +> if let int = row[...] as Int? { ... } // GOOD +> ``` + +Generally speaking, you can extract the type you need, provided it can be converted from the underlying SQLite value: + +- **Successful conversions include:** + + - All numeric SQLite values to all numeric Swift types, and Bool (zero is the only false boolean). + - Text SQLite values to Swift String. + - Blob SQLite values to Foundation Data. + + See [Values](#values) for more information on supported types (Bool, Int, String, Date, Swift enums, etc.) + +- **NULL returns nil.** + + ```swift + let row = try Row.fetchOne(db, sql: "SELECT NULL")! + row[0] as Int? // nil + row[0] as Int // fatal error: could not convert NULL to Int. + ``` + + There is one exception, though: the [DatabaseValue](#databasevalue) type: + + ```swift + row[0] as DatabaseValue // DatabaseValue.null + ``` + +- **Missing columns return nil.** + + ```swift + let row = try Row.fetchOne(db, sql: "SELECT 'foo' AS foo")! + row["missing"] as String? // nil + row["missing"] as String // fatal error: no such column: missing + ``` + + You can explicitly check for a column presence with the `hasColumn` method. + +- **Invalid conversions throw a fatal error.** + + ```swift + let row = try Row.fetchOne(db, sql: "SELECT 'Mom’s birthday'")! + row[0] as String // "Mom’s birthday" + row[0] as Date? // fatal error: could not convert "Mom’s birthday" to Date. + row[0] as Date // fatal error: could not convert "Mom’s birthday" to Date. + + let row = try Row.fetchOne(db, sql: "SELECT 256")! + row[0] as Int // 256 + row[0] as UInt8? // fatal error: could not convert 256 to UInt8. + row[0] as UInt8 // fatal error: could not convert 256 to UInt8. + ``` + + Those conversion fatal errors can be avoided with the [DatabaseValue](#databasevalue) type: + + ```swift + let row = try Row.fetchOne(db, sql: "SELECT 'Mom’s birthday'")! + let dbValue: DatabaseValue = row[0] + if dbValue.isNull { + // Handle NULL + } else if let date = Date.fromDatabaseValue(dbValue) { + // Handle valid date + } else { + // Handle invalid date + } + ``` + + This extra verbosity is the consequence of having to deal with an untrusted database: you may consider fixing the content of your database instead. See [Fatal Errors](#fatal-errors) for more information. + +- **SQLite has a weak type system, and provides [convenience conversions](https://www.sqlite.org/c3ref/column_blob.html) that can turn String to Int, Double to Blob, etc.** + + GRDB will sometimes let those conversions go through: + + ```swift + let rows = try Row.fetchCursor(db, sql: "SELECT '20 small cigars'") + while let row = try rows.next() { + row[0] as Int // 20 + } + ``` + + Don't freak out: those conversions did not prevent SQLite from becoming the immensely successful database engine you want to use. And GRDB adds safety checks described just above. You can also prevent those convenience conversions altogether by using the [DatabaseValue](#databasevalue) type. + + +#### DatabaseValue + +📖 [`DatabaseValue`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalue) + +**`DatabaseValue` is an intermediate type between SQLite and your values, which gives information about the raw value stored in the database.** + +You get `DatabaseValue` just like other value types: + +```swift +let dbValue: DatabaseValue = row[0] +let dbValue: DatabaseValue? = row["name"] // nil if and only if column does not exist + +// Check for NULL: +dbValue.isNull // Bool + +// The stored value: +dbValue.storage.value // Int64, Double, String, Data, or nil + +// All the five storage classes supported by SQLite: +switch dbValue.storage { +case .null: print("NULL") +case .int64(let int64): print("Int64: \(int64)") +case .double(let double): print("Double: \(double)") +case .string(let string): print("String: \(string)") +case .blob(let data): print("Data: \(data)") +} +``` + +You can extract regular [values](#values) (Bool, Int, String, Date, Swift enums, etc.) from `DatabaseValue` with the [fromDatabaseValue()](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible/fromdatabasevalue(_:)-21zzv) method: + +```swift +let dbValue: DatabaseValue = row["bookCount"] +let bookCount = Int.fromDatabaseValue(dbValue) // Int? +let bookCount64 = Int64.fromDatabaseValue(dbValue) // Int64? +let hasBooks = Bool.fromDatabaseValue(dbValue) // Bool?, false when 0 + +let dbValue: DatabaseValue = row["date"] +let string = String.fromDatabaseValue(dbValue) // "2015-09-11 18:14:15.123" +let date = Date.fromDatabaseValue(dbValue) // Date? +``` + +`fromDatabaseValue` returns nil for invalid conversions: + +```swift +let row = try Row.fetchOne(db, sql: "SELECT 'Mom’s birthday'")! +let dbValue: DatabaseValue = row[0] +let string = String.fromDatabaseValue(dbValue) // "Mom’s birthday" +let int = Int.fromDatabaseValue(dbValue) // nil +let date = Date.fromDatabaseValue(dbValue) // nil +``` + + +#### Rows as Dictionaries + +Row adopts the standard [RandomAccessCollection](https://developer.apple.com/documentation/swift/randomaccesscollection) protocol, and can be seen as a dictionary of [DatabaseValue](#databasevalue): + +```swift +// All the (columnName, dbValue) tuples, from left to right: +for (columnName, dbValue) in row { + ... +} +``` + +**You can build rows from dictionaries** (standard Swift dictionaries and NSDictionary). See [Values](#values) for more information on supported types: + +```swift +let row: Row = ["name": "foo", "date": nil] +let row = Row(["name": "foo", "date": nil]) +let row = Row(/* [AnyHashable: Any] */) // nil if invalid dictionary +``` + +Yet rows are not real dictionaries: they may contain duplicate columns: + +```swift +let row = try Row.fetchOne(db, sql: "SELECT 1 AS foo, 2 AS foo")! +row.columnNames // ["foo", "foo"] +row.databaseValues // [1, 2] +row["foo"] // 1 (leftmost matching column) +for (columnName, dbValue) in row { ... } // ("foo", 1), ("foo", 2) +``` + +**When you build a dictionary from a row**, you have to disambiguate identical columns, and choose how to present database values. For example: + +- A `[String: DatabaseValue]` dictionary that keeps leftmost value in case of duplicated column name: + + ```swift + let dict = Dictionary(row, uniquingKeysWith: { (left, _) in left }) + ``` + +- A `[String: AnyObject]` dictionary which keeps rightmost value in case of duplicated column name. This dictionary is identical to FMResultSet's resultDictionary from FMDB. It contains NSNull values for null columns, and can be shared with Objective-C: + + ```swift + let dict = Dictionary( + row.map { (column, dbValue) in + (column, dbValue.storage.value as AnyObject) + }, + uniquingKeysWith: { (_, right) in right }) + ``` + +- A `[String: Any]` dictionary that can feed, for example, JSONSerialization: + + ```swift + let dict = Dictionary( + row.map { (column, dbValue) in + (column, dbValue.storage.value) + }, + uniquingKeysWith: { (left, _) in left }) + ``` + +See the documentation of [`Dictionary.init(_:uniquingKeysWith:)`](https://developer.apple.com/documentation/swift/dictionary/2892961-init) for more information. + + +### Value Queries + +📖 [`DatabaseValueConvertible`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible) + +**Instead of rows, you can directly fetch values.** There are many supported [value types](#values) (Bool, Int, String, Date, Swift enums, etc.). + +Like rows, fetch values as **cursors**, **arrays**, **sets**, or **single** values (see [fetching methods](#fetching-methods)). Values are extracted from the leftmost column of the SQL queries: + +```swift +try dbQueue.read { db in + try Int.fetchCursor(db, sql: "SELECT ...", arguments: ...) // A Cursor of Int + try Int.fetchAll(db, sql: "SELECT ...", arguments: ...) // [Int] + try Int.fetchSet(db, sql: "SELECT ...", arguments: ...) // Set + try Int.fetchOne(db, sql: "SELECT ...", arguments: ...) // Int? + + let maxScore = try Int.fetchOne(db, sql: "SELECT MAX(score) FROM player") // Int? + let names = try String.fetchAll(db, sql: "SELECT name FROM player") // [String] +} +``` + +`Int.fetchOne` returns nil in two cases: either the SELECT statement yielded no row, or one row with a NULL value: + +```swift +// No row: +try Int.fetchOne(db, sql: "SELECT 42 WHERE FALSE") // nil + +// One row with a NULL value: +try Int.fetchOne(db, sql: "SELECT NULL") // nil + +// One row with a non-NULL value: +try Int.fetchOne(db, sql: "SELECT 42") // 42 +``` + +For requests which may contain NULL, fetch optionals: + +```swift +try dbQueue.read { db in + try Optional.fetchCursor(db, sql: "SELECT ...", arguments: ...) // A Cursor of Int? + try Optional.fetchAll(db, sql: "SELECT ...", arguments: ...) // [Int?] + try Optional.fetchSet(db, sql: "SELECT ...", arguments: ...) // Set +} +``` + +> :bulb: **Tip**: One advanced use case, when you fetch one value, is to distinguish the cases of a statement that yields no row, or one row with a NULL value. To do so, use `Optional.fetchOne`, which returns a double optional `Int??`: +> +> ```swift +> // No row: +> try Optional.fetchOne(db, sql: "SELECT 42 WHERE FALSE") // .none +> // One row with a NULL value: +> try Optional.fetchOne(db, sql: "SELECT NULL") // .some(.none) +> // One row with a non-NULL value: +> try Optional.fetchOne(db, sql: "SELECT 42") // .some(.some(42)) +> ``` + +There are many supported value types (Bool, Int, String, Date, Swift enums, etc.). See [Values](#values) for more information. + + +## Values + +GRDB ships with built-in support for the following value types: + +- **Swift Standard Library**: Bool, Double, Float, all signed and unsigned integer types, String, [Swift enums](#swift-enums). + +- **Foundation**: [Data](#data-and-memory-savings), [Date](#date-and-datecomponents), [DateComponents](#date-and-datecomponents), [Decimal](#nsnumber-nsdecimalnumber-and-decimal), NSNull, [NSNumber](#nsnumber-nsdecimalnumber-and-decimal), NSString, URL, [UUID](#uuid). + +- **CoreGraphics**: CGFloat. + +- **[DatabaseValue](#databasevalue)**, the type which gives information about the raw value stored in the database. + +- **Full-Text Patterns**: [FTS3Pattern](Documentation/FullTextSearch.md#fts3pattern) and [FTS5Pattern](Documentation/FullTextSearch.md#fts5pattern). + +- Generally speaking, all types that adopt the [`DatabaseValueConvertible`] protocol. + +Values can be used as [statement arguments](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments): + +```swift +let url: URL = ... +let verified: Bool = ... +try db.execute( + sql: "INSERT INTO link (url, verified) VALUES (?, ?)", + arguments: [url, verified]) +``` + +Values can be [extracted from rows](#column-values): + +```swift +let rows = try Row.fetchCursor(db, sql: "SELECT * FROM link") +while let row = try rows.next() { + let url: URL = row["url"] + let verified: Bool = row["verified"] +} +``` + +Values can be [directly fetched](#value-queries): + +```swift +let urls = try URL.fetchAll(db, sql: "SELECT url FROM link") // [URL] +``` + +Use values in [Records](#records): + +```swift +struct Link: FetchableRecord { + var url: URL + var isVerified: Bool + + init(row: Row) { + url = row["url"] + isVerified = row["verified"] + } +} +``` + +Use values in the [query interface](#the-query-interface): + +```swift +let url: URL = ... +let link = try Link.filter(Column("url") == url).fetchOne(db) +``` + + +### Data (and Memory Savings) + +**Data** suits the BLOB SQLite columns. It can be stored and fetched from the database just like other [values](#values): + +```swift +let rows = try Row.fetchCursor(db, sql: "SELECT data, ...") +while let row = try rows.next() { + let data: Data = row["data"] +} +``` + +At each step of the request iteration, the `row[]` subscript creates *two copies* of the database bytes: one fetched by SQLite, and another, stored in the Swift Data value. + +**You have the opportunity to save memory** by not copying the data fetched by SQLite: + +```swift +while let row = try rows.next() { + try row.withUnsafeData(name: "data") { (data: Data?) in + ... + } +} +``` + +The non-copied data does not live longer than the iteration step: make sure that you do not use it past this point. + + +### Date and DateComponents + +[**Date**](#date) and [**DateComponents**](#datecomponents) can be stored and fetched from the database. + +Here is how GRDB supports the various [date formats](https://www.sqlite.org/lang_datefunc.html) supported by SQLite: + +| SQLite format | Date | DateComponents | +|:---------------------------- |:------------------:|:--------------:| +| YYYY-MM-DD | Read ¹ | Read / Write | +| YYYY-MM-DD HH:MM | Read ¹ ² | Read ² / Write | +| YYYY-MM-DD HH:MM:SS | Read ¹ ² | Read ² / Write | +| YYYY-MM-DD HH:MM:SS.SSS | Read ¹ ² / Write ¹ | Read ² / Write | +| YYYY-MM-DD**T**HH:MM | Read ¹ ² | Read ² | +| YYYY-MM-DD**T**HH:MM:SS | Read ¹ ² | Read ² | +| YYYY-MM-DD**T**HH:MM:SS.SSS | Read ¹ ² | Read ² | +| HH:MM | | Read ² / Write | +| HH:MM:SS | | Read ² / Write | +| HH:MM:SS.SSS | | Read ² / Write | +| Timestamps since unix epoch | Read ³ | | +| `now` | | | + +¹ Missing components are assumed to be zero. Dates are stored and read in the UTC time zone, unless the format is followed by a timezone indicator ⁽²⁾. + +² This format may be optionally followed by a timezone indicator of the form `[+-]HH:MM` or just `Z`. + +³ GRDB 2+ interprets numerical values as timestamps that fuel `Date(timeIntervalSince1970:)`. Previous GRDB versions used to interpret numbers as [julian days](https://en.wikipedia.org/wiki/Julian_day). Julian days are still supported, with the `Date(julianDay:)` initializer. + +> **Warning**: the range of valid years in the SQLite date formats is 0000-9999. You will need to pick another date format when your application needs to process years outside of this range. See the following chapters. + + +#### Date + +**Date** can be stored and fetched from the database just like other [values](#values): + +```swift +try db.execute( + sql: "INSERT INTO player (creationDate, ...) VALUES (?, ...)", + arguments: [Date(), ...]) + +let row = try Row.fetchOne(db, ...)! +let creationDate: Date = row["creationDate"] +``` + +Dates are stored using the format "YYYY-MM-DD HH:MM:SS.SSS" in the UTC time zone. It is precise to the millisecond. + +> **Note**: this format was chosen because it is the only format that is: +> +> - Comparable (`ORDER BY date` works) +> - Comparable with the SQLite keyword CURRENT_TIMESTAMP (`WHERE date > CURRENT_TIMESTAMP` works) +> - Able to feed [SQLite date & time functions](https://www.sqlite.org/lang_datefunc.html) +> - Precise enough +> +> **Warning**: the range of valid years in the SQLite date format is 0000-9999. You will experience problems with years outside of this range, such as decoding errors, or invalid date computations with [SQLite date & time functions](https://www.sqlite.org/lang_datefunc.html). + +Some applications may prefer another date format: + +- Some may prefer ISO-8601, with a `T` separator. +- Some may prefer ISO-8601, with a time zone. +- Some may need to store years beyond the 0000-9999 range. +- Some may need sub-millisecond precision. +- Some may need exact `Date` roundtrip. +- Etc. + +**You should think twice before choosing a different date format:** + +- ISO-8601 is about *exchange and communication*, when SQLite is about *storage and data manipulation*. Sharing the same representation in your database and in JSON files only provides a superficial convenience, and should be the least of your priorities. Don't store dates as ISO-8601 without understanding what you lose. For example, ISO-8601 time zones forbid database-level date comparison. +- Sub-millisecond precision and exact `Date` roundtrip are not as obvious needs as it seems at first sight. Dates generally don't precisely roundtrip as soon as they leave your application anyway, because the other systems your app communicates with use their own date representation (the Android version of your app, the server your application is talking to, etc.) On top of that, `Date` comparison is at least as hard and nasty as [floating point comparison](https://www.google.com/search?q=floating+point+comparison+is+hard). + +The customization of date format is explicit. For example: + +```swift +let date = Date() +let timeInterval = date.timeIntervalSinceReferenceDate +try db.execute( + sql: "INSERT INTO player (creationDate, ...) VALUES (?, ...)", + arguments: [timeInterval, ...]) + +if let row = try Row.fetchOne(db, ...) { + let timeInterval: TimeInterval = row["creationDate"] + let creationDate = Date(timeIntervalSinceReferenceDate: timeInterval) +} +``` + +See also [Codable Records] for more date customization options, and [`DatabaseValueConvertible`] if you want to define a Date-wrapping type with customized database representation. + + +#### DateComponents + +DateComponents is indirectly supported, through the **DatabaseDateComponents** helper type. + +DatabaseDateComponents reads date components from all [date formats supported by SQLite](https://www.sqlite.org/lang_datefunc.html), and stores them in the format of your choice, from HH:MM to YYYY-MM-DD HH:MM:SS.SSS. + +> **Warning**: the range of valid years is 0000-9999. You will experience problems with years outside of this range, such as decoding errors, or invalid date computations with [SQLite date & time functions](https://www.sqlite.org/lang_datefunc.html). See [Date](#date) for more information. + +DatabaseDateComponents can be stored and fetched from the database just like other [values](#values): + +```swift +let components = DateComponents() +components.year = 1973 +components.month = 9 +components.day = 18 + +// Store "1973-09-18" +let dbComponents = DatabaseDateComponents(components, format: .YMD) +try db.execute( + sql: "INSERT INTO player (birthDate, ...) VALUES (?, ...)", + arguments: [dbComponents, ...]) + +// Read "1973-09-18" +let row = try Row.fetchOne(db, sql: "SELECT birthDate ...")! +let dbComponents: DatabaseDateComponents = row["birthDate"] +dbComponents.format // .YMD (the actual format found in the database) +dbComponents.dateComponents // DateComponents +``` + + +### NSNumber, NSDecimalNumber, and Decimal + +**NSNumber** and **Decimal** can be stored and fetched from the database just like other [values](#values). + +Here is how GRDB supports the various data types supported by SQLite: + +| | Integer | Double | String | +|:--------------- |:------------:|:------------:|:------------:| +| NSNumber | Read / Write | Read / Write | Read | +| NSDecimalNumber | Read / Write | Read / Write | Read | +| Decimal | Read | Read | Read / Write | + +- All three types can decode database integers and doubles: + + ```swift + let number = try NSNumber.fetchOne(db, sql: "SELECT 10") // NSNumber + let number = try NSDecimalNumber.fetchOne(db, sql: "SELECT 1.23") // NSDecimalNumber + let number = try Decimal.fetchOne(db, sql: "SELECT -100") // Decimal + ``` + +- All three types decode database strings as decimal numbers: + + ```swift + let number = try NSNumber.fetchOne(db, sql: "SELECT '10'") // NSDecimalNumber (sic) + let number = try NSDecimalNumber.fetchOne(db, sql: "SELECT '1.23'") // NSDecimalNumber + let number = try Decimal.fetchOne(db, sql: "SELECT '-100'") // Decimal + ``` + +- `NSNumber` and `NSDecimalNumber` send 64-bit signed integers and doubles in the database: + + ```swift + // INSERT INTO transfer VALUES (10) + try db.execute(sql: "INSERT INTO transfer VALUES (?)", arguments: [NSNumber(value: 10)]) + + // INSERT INTO transfer VALUES (10.0) + try db.execute(sql: "INSERT INTO transfer VALUES (?)", arguments: [NSNumber(value: 10.0)]) + + // INSERT INTO transfer VALUES (10) + try db.execute(sql: "INSERT INTO transfer VALUES (?)", arguments: [NSDecimalNumber(string: "10.0")]) + + // INSERT INTO transfer VALUES (10.5) + try db.execute(sql: "INSERT INTO transfer VALUES (?)", arguments: [NSDecimalNumber(string: "10.5")]) + ``` + + > **Warning**: since SQLite does not support decimal numbers, sending a non-integer `NSDecimalNumber` can result in a loss of precision during the conversion to double. + > + > Instead of sending non-integer `NSDecimalNumber` to the database, you may prefer: + > + > - Send `Decimal` instead (those store decimal strings in the database). + > - Send integers instead (for example, store amounts of cents instead of amounts of Euros). + +- `Decimal` sends decimal strings in the database: + + ```swift + // INSERT INTO transfer VALUES ('10') + try db.execute(sql: "INSERT INTO transfer VALUES (?)", arguments: [Decimal(10)]) + + // INSERT INTO transfer VALUES ('10.5') + try db.execute(sql: "INSERT INTO transfer VALUES (?)", arguments: [Decimal(string: "10.5")!]) + ``` + + +### UUID + +**UUID** can be stored and fetched from the database just like other [values](#values). + +GRDB stores uuids as 16-bytes data blobs, and decodes them from both 16-bytes data blobs and strings such as "E621E1F8-C36C-495A-93FC-0C247A3E6E5F". + + +### Swift Enums + +**Swift enums** and generally all types that adopt the [RawRepresentable](https://developer.apple.com/library/tvos/documentation/Swift/Reference/Swift_RawRepresentable_Protocol/index.html) protocol can be stored and fetched from the database just like their raw [values](#values): + +```swift +enum Color : Int { + case red, white, rose +} + +enum Grape : String { + case chardonnay, merlot, riesling +} + +// Declare empty DatabaseValueConvertible adoption +extension Color : DatabaseValueConvertible { } +extension Grape : DatabaseValueConvertible { } + +// Store +try db.execute( + sql: "INSERT INTO wine (grape, color) VALUES (?, ?)", + arguments: [Grape.merlot, Color.red]) + +// Read +let rows = try Row.fetchCursor(db, sql: "SELECT * FROM wine") +while let row = try rows.next() { + let grape: Grape = row["grape"] + let color: Color = row["color"] +} +``` + +**When a database value does not match any enum case**, you get a fatal error. This fatal error can be avoided with the [DatabaseValue](#databasevalue) type: + +```swift +let row = try Row.fetchOne(db, sql: "SELECT 'syrah'")! + +row[0] as String // "syrah" +row[0] as Grape? // fatal error: could not convert "syrah" to Grape. +row[0] as Grape // fatal error: could not convert "syrah" to Grape. + +let dbValue: DatabaseValue = row[0] +if dbValue.isNull { + // Handle NULL +} else if let grape = Grape.fromDatabaseValue(dbValue) { + // Handle valid grape +} else { + // Handle unknown grape +} +``` + + +## Custom SQL Functions and Aggregates + +**SQLite lets you define SQL functions and aggregates.** + +A custom SQL function or aggregate extends SQLite: + +```sql +SELECT reverse(name) FROM player; -- custom function +SELECT maxLength(name) FROM player; -- custom aggregate +``` + +- [Custom SQL Functions](#custom-sql-functions) +- [Custom Aggregates](#custom-aggregates) + + +### Custom SQL Functions + +📖 [`DatabaseFunction`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasefunction) + +A *function* argument takes an array of [DatabaseValue](#databasevalue), and returns any valid [value](#values) (Bool, Int, String, Date, Swift enums, etc.) The number of database values is guaranteed to be *argumentCount*. + +SQLite has the opportunity to perform additional optimizations when functions are "pure", which means that their result only depends on their arguments. So make sure to set the *pure* argument to true when possible. + +```swift +let reverse = DatabaseFunction("reverse", argumentCount: 1, pure: true) { (values: [DatabaseValue]) in + // Extract string value, if any... + guard let string = String.fromDatabaseValue(values[0]) else { + return nil + } + // ... and return reversed string: + return String(string.reversed()) +} +``` + +You make a function available to a database connection through its configuration: + +```swift +var config = Configuration() +config.prepareDatabase { db in + db.add(function: reverse) +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) + +try dbQueue.read { db in + // "oof" + try String.fetchOne(db, sql: "SELECT reverse('foo')")! +} +``` + + +**Functions can take a variable number of arguments:** + +When you don't provide any explicit *argumentCount*, the function can take any number of arguments: + +```swift +let averageOf = DatabaseFunction("averageOf", pure: true) { (values: [DatabaseValue]) in + let doubles = values.compactMap { Double.fromDatabaseValue($0) } + return doubles.reduce(0, +) / Double(doubles.count) +} +db.add(function: averageOf) + +// 2.0 +try Double.fetchOne(db, sql: "SELECT averageOf(1, 2, 3)")! +``` + + +**Functions can throw:** + +```swift +let sqrt = DatabaseFunction("sqrt", argumentCount: 1, pure: true) { (values: [DatabaseValue]) in + guard let double = Double.fromDatabaseValue(values[0]) else { + return nil + } + guard double >= 0 else { + throw DatabaseError(message: "invalid negative number") + } + return sqrt(double) +} +db.add(function: sqrt) + +// SQLite error 1 with statement `SELECT sqrt(-1)`: invalid negative number +try Double.fetchOne(db, sql: "SELECT sqrt(-1)")! +``` + + +**Use custom functions in the [query interface](#the-query-interface):** + +```swift +// SELECT reverseString("name") FROM player +Player.select(reverseString(nameColumn)) +``` + + +**GRDB ships with built-in SQL functions that perform unicode-aware string transformations.** See [Unicode](#unicode). + + +### Custom Aggregates + +📖 [`DatabaseFunction`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasefunction), [`DatabaseAggregate`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseaggregate) + +Before registering a custom aggregate, you need to define a type that adopts the `DatabaseAggregate` protocol: + +```swift +protocol DatabaseAggregate { + // Initializes an aggregate + init() + + // Called at each step of the aggregation + mutating func step(_ dbValues: [DatabaseValue]) throws + + // Returns the final result + func finalize() throws -> DatabaseValueConvertible? +} +``` + +For example: + +```swift +struct MaxLength : DatabaseAggregate { + var maxLength: Int = 0 + + mutating func step(_ dbValues: [DatabaseValue]) { + // At each step, extract string value, if any... + guard let string = String.fromDatabaseValue(dbValues[0]) else { + return + } + // ... and update the result + let length = string.count + if length > maxLength { + maxLength = length + } + } + + func finalize() -> DatabaseValueConvertible? { + maxLength + } +} + +let maxLength = DatabaseFunction( + "maxLength", + argumentCount: 1, + pure: true, + aggregate: MaxLength.self) +``` + +Like [custom SQL Functions](#custom-sql-functions), you make an aggregate function available to a database connection through its configuration: + +```swift +var config = Configuration() +config.prepareDatabase { db in + db.add(function: maxLength) +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) + +try dbQueue.read { db in + // Some Int + try Int.fetchOne(db, sql: "SELECT maxLength(name) FROM player")! +} +``` + +The `step` method of the aggregate takes an array of [DatabaseValue](#databasevalue). This array contains as many values as the *argumentCount* parameter (or any number of values, when *argumentCount* is omitted). + +The `finalize` method of the aggregate returns the final aggregated [value](#values) (Bool, Int, String, Date, Swift enums, etc.). + +SQLite has the opportunity to perform additional optimizations when aggregates are "pure", which means that their result only depends on their inputs. So make sure to set the *pure* argument to true when possible. + + +**Use custom aggregates in the [query interface](#the-query-interface):** + +```swift +// SELECT maxLength("name") FROM player +let request = Player.select(maxLength.apply(nameColumn)) +try Int.fetchOne(db, request) // Int? +``` + + +## Database Schema Introspection + +GRDB comes with a set of schema introspection methods: + +```swift +try dbQueue.read { db in + // Bool, true if the table exists + try db.tableExists("player") + + // [ColumnInfo], the columns in the table + try db.columns(in: "player") + + // PrimaryKeyInfo + try db.primaryKey("player") + + // [ForeignKeyInfo], the foreign keys defined on the table + try db.foreignKeys(on: "player") + + // [IndexInfo], the indexes defined on the table + try db.indexes(on: "player") + + // Bool, true if column(s) is a unique key (primary key or unique index) + try db.table("player", hasUniqueKey: ["email"]) +} + +// Bool, true if argument is the name of an internal SQLite table +Database.isSQLiteInternalTable(...) + +// Bool, true if argument is the name of an internal GRDB table +Database.isGRDBInternalTable(...) +``` + +For more information, see [`tableExists(_:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/tableexists(_:)) and related methods. + + +## Raw SQLite Pointers + +**If not all SQLite APIs are exposed in GRDB, you can still use the [SQLite C Interface](https://www.sqlite.org/c3ref/intro.html) and call [SQLite C functions](https://www.sqlite.org/c3ref/funclist.html).** + +Those functions are embedded right into the GRDB module, regardless of the underlying SQLite implementation (system SQLite, [SQLCipher](#encryption), or [custom SQLite build]): + +```swift +import GRDB + +let sqliteVersion = String(cString: sqlite3_libversion()) +``` + +Raw pointers to database connections and statements are available through the `Database.sqliteConnection` and `Statement.sqliteStatement` properties: + +```swift +try dbQueue.read { db in + // The raw pointer to a database connection: + let sqliteConnection = db.sqliteConnection + + // The raw pointer to a statement: + let statement = try db.makeStatement(sql: "SELECT ...") + let sqliteStatement = statement.sqliteStatement +} +``` + +> **Note** +> +> - Those pointers are owned by GRDB: don't close connections or finalize statements created by GRDB. +> - GRDB opens SQLite connections in the "[multi-thread mode](https://www.sqlite.org/threadsafe.html)", which (oddly) means that **they are not thread-safe**. Make sure you touch raw databases and statements inside their dedicated dispatch queues. +> - Use the raw SQLite C Interface at your own risk. GRDB won't prevent you from shooting yourself in the foot. + + +Records +======= + +**On top of the [SQLite API](#sqlite-api), GRDB provides protocols and a class** that help manipulating database rows as regular objects named "records": + +```swift +try dbQueue.write { db in + if var place = try Place.fetchOne(db, id: 1) { + place.isFavorite = true + try place.update(db) + } +} +``` + +Of course, you need to open a [database connection], and [create database tables](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseschema) first. + +To define your custom records, you subclass the ready-made `Record` class, or you extend your structs and classes with protocols that come with focused sets of features: fetching methods, persistence methods, record comparison... + +Extending structs with record protocols is more "swifty". Subclassing the Record class is more "classic". You can choose either way. See some [examples of record definitions](#examples-of-record-definitions), and the [list of record methods](#list-of-record-methods) for an overview. + +> **Note**: if you are familiar with Core Data's NSManagedObject or Realm's Object, you may experience a cultural shock: GRDB records are not uniqued, do not auto-update, and do not lazy-load. This is both a purpose, and a consequence of protocol-oriented programming. You should read [How to build an iOS application with SQLite and GRDB.swift](https://medium.com/@gwendal.roue/how-to-build-an-ios-application-with-sqlite-and-grdb-swift-d023a06c29b3) for a general introduction. +> +> :bulb: **Tip**: after you have read this chapter, check the [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) Guide. +> +> :bulb: **Tip**: see the [Demo Applications] for sample apps that uses records. + +**Overview** + +- [Inserting Records](#inserting-records) +- [Fetching Records](#fetching-records) +- [Updating Records](#updating-records) +- [Deleting Records](#deleting-records) +- [Counting Records](#counting-records) + +**Protocols and the Record Class** + +- [Record Protocols Overview](#record-protocols-overview) +- [FetchableRecord Protocol](#fetchablerecord-protocol) +- [TableRecord Protocol](#tablerecord-protocol) +- [PersistableRecord Protocol](#persistablerecord-protocol) + - [Persistence Methods] + - [Persistence Methods and the `RETURNING` clause] + - [Persistence Callbacks] +- [Identifiable Records] +- [Codable Records] +- [Record Class](#record-class) +- [Record Comparison] +- [Record Customization Options] +- [Record Timestamps and Transaction Date] + +**Records in a Glance** + +- [Examples of Record Definitions](#examples-of-record-definitions) +- [List of Record Methods](#list-of-record-methods) + + +### Inserting Records + +To insert a record in the database, call the `insert` method: + +```swift +let player = Player(name: "Arthur", email: "arthur@example.com") +try player.insert(db) +``` + +:point_right: `insert` is available for subclasses of the [Record](#record-class) class, and types that adopt the [PersistableRecord] protocol. + + +### Fetching Records + +To fetch records from the database, call a [fetching method](#fetching-methods): + +```swift +let arthur = try Player.fetchOne(db, // Player? + sql: "SELECT * FROM players WHERE name = ?", + arguments: ["Arthur"]) + +let bestPlayers = try Player // [Player] + .order(Column("score").desc) + .limit(10) + .fetchAll(db) + +let spain = try Country.fetchOne(db, id: "ES") // Country? +let italy = try Country.find(db, id: "IT") // Country +``` + +:point_right: Fetching from raw SQL is available for subclasses of the [Record](#record-class) class, and types that adopt the [FetchableRecord] protocol. + +:point_right: Fetching without SQL, using the [query interface](#the-query-interface), is available for subclasses of the [Record](#record-class) class, and types that adopt both [FetchableRecord] and [TableRecord] protocol. + + +### Updating Records + +To update a record in the database, call the `update` method: + +```swift +var player: Player = ... +player.score = 1000 +try player.update(db) +``` + +It is possible to [avoid useless updates](#record-comparison): + +```swift +// does not hit the database if score has not changed +try player.updateChanges(db) { + $0.score = 1000 +} +``` + +See the [query interface](#the-query-interface) for batch updates: + +```swift +try Player + .filter(Column("team") == "red") + .updateAll(db, Column("score") += 1) +``` + +:point_right: update methods are available for subclasses of the [Record](#record-class) class, and types that adopt the [PersistableRecord] protocol. Batch updates are available on the [TableRecord] protocol. + + +### Deleting Records + +To delete a record in the database, call the `delete` method: + +```swift +let player: Player = ... +try player.delete(db) +``` + +You can also delete by primary key, unique key, or perform batch deletes (see [Delete Requests](#delete-requests)): + +```swift +try Player.deleteOne(db, id: 1) +try Player.deleteOne(db, key: ["email": "arthur@example.com"]) +try Country.deleteAll(db, ids: ["FR", "US"]) +try Player + .filter(Column("email") == nil) + .deleteAll(db) +``` + +:point_right: delete methods are available for subclasses of the [Record](#record-class) class, and types that adopt the [PersistableRecord] protocol. Batch deletes are available on the [TableRecord] protocol. + + +### Counting Records + +To count records, call the `fetchCount` method: + +```swift +let playerCount: Int = try Player.fetchCount(db) + +let playerWithEmailCount: Int = try Player + .filter(Column("email") == nil) + .fetchCount(db) +``` + +:point_right: `fetchCount` is available for subclasses of the [Record](#record-class) class, and types that adopt the [TableRecord] protocol. + + +Details follow: + +- [Record Protocols Overview](#record-protocols-overview) +- [FetchableRecord Protocol](#fetchablerecord-protocol) +- [TableRecord Protocol](#tablerecord-protocol) +- [PersistableRecord Protocol](#persistablerecord-protocol) +- [Identifiable Records] +- [Codable Records] +- [Record Class](#record-class) +- [Record Comparison] +- [Record Customization Options] +- [Examples of Record Definitions](#examples-of-record-definitions) +- [List of Record Methods](#list-of-record-methods) + + +## Record Protocols Overview + +**GRDB ships with three record protocols**. Your own types will adopt one or several of them, according to the abilities you want to extend your types with. + +- [FetchableRecord] is able to **decode database rows**. + + ```swift + struct Place: FetchableRecord { ... } + let places = try dbQueue.read { db in + try Place.fetchAll(db, sql: "SELECT * FROM place") + } + ``` + + > :bulb: **Tip**: `FetchableRecord` can derive its implementation from the standard `Decodable` protocol. See [Codable Records] for more information. + + `FetchableRecord` can decode database rows, but it is not able to build SQL requests for you. For that, you also need `TableRecord`: + +- [TableRecord] is able to **generate SQL queries**: + + ```swift + struct Place: TableRecord { ... } + let placeCount = try dbQueue.read { db in + // Generates and runs `SELECT COUNT(*) FROM place` + try Place.fetchCount(db) + } + ``` + + When a type adopts both `TableRecord` and `FetchableRecord`, it can load from those requests: + + ```swift + struct Place: TableRecord, FetchableRecord { ... } + try dbQueue.read { db in + let places = try Place.order(Column("title")).fetchAll(db) + let paris = try Place.fetchOne(id: 1) + } + ``` + +- [PersistableRecord] is able to **write**: it can create, update, and delete rows in the database: + + ```swift + struct Place : PersistableRecord { ... } + try dbQueue.write { db in + try Place.delete(db, id: 1) + try Place(...).insert(db) + } + ``` + + A persistable record can also [compare](#record-comparison) itself against other records, and avoid useless database updates. + + > :bulb: **Tip**: `PersistableRecord` can derive its implementation from the standard `Encodable` protocol. See [Codable Records] for more information. + + +## FetchableRecord Protocol + +📖 [`FetchableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/fetchablerecord) + +**The FetchableRecord protocol grants fetching methods to any type** that can be built from a database row: + +```swift +protocol FetchableRecord { + /// Row initializer + init(row: Row) throws +} +``` + +**To use FetchableRecord**, subclass the [Record](#record-class) class, or adopt it explicitly. For example: + +```swift +struct Place { + var id: Int64? + var title: String + var coordinate: CLLocationCoordinate2D +} + +extension Place : FetchableRecord { + init(row: Row) { + id = row["id"] + title = row["title"] + coordinate = CLLocationCoordinate2D( + latitude: row["latitude"], + longitude: row["longitude"]) + } +} +``` + +Rows also accept column enums: + +```swift +extension Place : FetchableRecord { + enum Columns: String, ColumnExpression { + case id, title, latitude, longitude + } + + init(row: Row) { + id = row[Columns.id] + title = row[Columns.title] + coordinate = CLLocationCoordinate2D( + latitude: row[Columns.latitude], + longitude: row[Columns.longitude]) + } +} +``` + +See [column values](#column-values) for more information about the `row[]` subscript. + +When your record type adopts the standard Decodable protocol, you don't have to provide the implementation for `init(row:)`. See [Codable Records] for more information: + +```swift +// That's all +struct Player: Decodable, FetchableRecord { + var id: Int64 + var name: String + var score: Int +} +``` + +FetchableRecord allows adopting types to be fetched from SQL queries: + +```swift +try Place.fetchCursor(db, sql: "SELECT ...", arguments:...) // A Cursor of Place +try Place.fetchAll(db, sql: "SELECT ...", arguments:...) // [Place] +try Place.fetchSet(db, sql: "SELECT ...", arguments:...) // Set +try Place.fetchOne(db, sql: "SELECT ...", arguments:...) // Place? +``` + +See [fetching methods](#fetching-methods) for information about the `fetchCursor`, `fetchAll`, `fetchSet` and `fetchOne` methods. See [`StatementArguments`] for more information about the query arguments. + +> **Note**: for performance reasons, the same row argument to `init(row:)` is reused during the iteration of a fetch query. If you want to keep the row for later use, make sure to store a copy: `self.row = row.copy()`. + +> **Note**: The `FetchableRecord.init(row:)` initializer fits the needs of most applications. But some application are more demanding than others. When FetchableRecord does not exactly provide the support you need, have a look at the [Beyond FetchableRecord] chapter. + + +## TableRecord Protocol + +📖 [`TableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerecord) + +**The TableRecord protocol** generates SQL for you. To use TableRecord, subclass the [Record](#record-class) class, or adopt it explicitly: + +```swift +protocol TableRecord { + static var databaseTableName: String { get } + static var databaseSelection: [any SQLSelectable] { get } +} +``` + +The `databaseSelection` type property is optional, and documented in the [Columns Selected by a Request] chapter. + +The `databaseTableName` type property is the name of a database table. By default, it is derived from the type name: + +```swift +struct Place: TableRecord { } +print(Place.databaseTableName) // prints "place" +``` + +For example: + +- Place: `place` +- Country: `country` +- PostalAddress: `postalAddress` +- HTTPRequest: `httpRequest` +- TOEFL: `toefl` + +You can still provide a custom table name: + +```swift +struct Place: TableRecord { + static let databaseTableName = "location" +} +print(Place.databaseTableName) // prints "location" +``` + +Subclasses of the [Record](#record-class) class must always override their superclass's `databaseTableName` property: + +```swift +class Place: Record { + override class var databaseTableName: String { "place" } +} +print(Place.databaseTableName) // prints "place" +``` + +When a type adopts both TableRecord and [FetchableRecord](#fetchablerecord-protocol), it can be fetched using the [query interface](#the-query-interface): + +```swift +// SELECT * FROM place WHERE name = 'Paris' +let paris = try Place.filter(nameColumn == "Paris").fetchOne(db) +``` + +TableRecord can also fetch deal with primary and unique keys: see [Fetching by Key](#fetching-by-key) and [Testing for Record Existence](#testing-for-record-existence). + + +## PersistableRecord Protocol + +📖 [`EncodableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/encodablerecord), [`MutablePersistableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/mutablepersistablerecord), [`PersistableRecord`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/persistablerecord) + +**GRDB record types can create, update, and delete rows in the database.** + +Those abilities are granted by three protocols: + +```swift +// Defines how a record encodes itself into the database +protocol EncodableRecord { + /// Defines the values persisted in the database + func encode(to container: inout PersistenceContainer) throws +} + +// Adds persistence methods +protocol MutablePersistableRecord: TableRecord, EncodableRecord { + /// Optional method that lets your adopting type store its rowID upon + /// successful insertion. Don't call it directly: it is called for you. + mutating func didInsert(_ inserted: InsertionSuccess) +} + +// Adds immutability +protocol PersistableRecord: MutablePersistableRecord { + /// Non-mutating version of the optional didInsert(_:) + func didInsert(_ inserted: InsertionSuccess) +} +``` + +Yes, three protocols instead of one. Here is how you pick one or the other: + +- **If your type is a class**, choose `PersistableRecord`. On top of that, implement `didInsert(_:)` if the database table has an auto-incremented primary key. + +- **If your type is a struct, and the database table has an auto-incremented primary key**, choose `MutablePersistableRecord`, and implement `didInsert(_:)`. + +- **Otherwise**, choose `PersistableRecord`, and ignore `didInsert(_:)`. + +The `encode(to:)` method defines which [values](#values) (Bool, Int, String, Date, Swift enums, etc.) are assigned to database columns. + +The optional `didInsert` method lets the adopting type store its rowID after successful insertion, and is only useful for tables that have an auto-incremented primary key. It is called from a protected dispatch queue, and serialized with all database updates. + +**To use the persistable protocols**, subclass the [Record](#record-class) class, or adopt one of them explicitly. For example: + +```swift +extension Place : MutablePersistableRecord { + /// The values persisted in the database + func encode(to container: inout PersistenceContainer) { + container["id"] = id + container["title"] = title + container["latitude"] = coordinate.latitude + container["longitude"] = coordinate.longitude + } + + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} + +var paris = Place( + id: nil, + title: "Paris", + coordinate: CLLocationCoordinate2D(latitude: 48.8534100, longitude: 2.3488000)) + +try paris.insert(db) +paris.id // some value +``` + +Persistence containers also accept column enums: + +```swift +extension Place : MutablePersistableRecord { + enum Columns: String, ColumnExpression { + case id, title, latitude, longitude + } + + func encode(to container: inout PersistenceContainer) { + container[Columns.id] = id + container[Columns.title] = title + container[Columns.latitude] = coordinate.latitude + container[Columns.longitude] = coordinate.longitude + } +} +``` + +When your record type adopts the standard Encodable protocol, you don't have to provide the implementation for `encode(to:)`. See [Codable Records] for more information: + +```swift +// That's all +struct Player: Encodable, MutablePersistableRecord { + var id: Int64? + var name: String + var score: Int + + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} +``` + + +### Persistence Methods + +[Record](#record-class) subclasses and types that adopt [PersistableRecord] are given methods that insert, update, and delete: + +```swift +// INSERT +try place.insert(db) +let insertedPlace = try place.inserted(db) // non-mutating + +// UPDATE +try place.update(db) +try place.update(db, columns: ["title"]) + +// Maybe UPDATE +try place.updateChanges(db, from: otherPlace) +try place.updateChanges(db) { $0.isFavorite = true } +try place.updateChanges(db) // Record class only + +// INSERT or UPDATE +try place.save(db) +let savedPlace = place.saved(db) // non-mutating + +// UPSERT +try place.upsert(db) +let insertedPlace = place.upsertAndFetch(db) + +// DELETE +try place.delete(db) + +// EXISTENCE CHECK +let exists = try place.exists(db) +``` + +See [Upsert](#upsert) below for more information about upserts. + +**The [TableRecord] protocol comes with batch operations**: + +```swift +// UPDATE +try Place.updateAll(db, ...) + +// DELETE +try Place.deleteAll(db) +try Place.deleteAll(db, ids:...) +try Place.deleteAll(db, keys:...) +try Place.deleteOne(db, id:...) +try Place.deleteOne(db, key:...) +``` + +For more information about batch updates, see [Update Requests](#update-requests). + +- All persistence methods can throw a [DatabaseError](#error-handling). + +- `update` and `updateChanges` throw [RecordError] if the database does not contain any row for the primary key of the record. + +- `save` makes sure your values are stored in the database. It performs an UPDATE if the record has a non-null primary key, and then, if no row was modified, an INSERT. It directly performs an INSERT if the record has no primary key, or a null primary key. + +- `delete` and `deleteOne` returns whether a database row was deleted or not. `deleteAll` returns the number of deleted rows. `updateAll` returns the number of updated rows. `updateChanges` returns whether a database row was updated or not. + +**All primary keys are supported**, including composite primary keys that span several columns, and the [hidden `rowid` column](https://www.sqlite.org/rowidtable.html). + +**To customize persistence methods**, you provide [Persistence Callbacks], described below. Do not attempt at overriding the ready-made persistence methods. + +### Upsert + +[UPSERT](https://www.sqlite.org/lang_UPSERT.html) is an SQLite feature that causes an INSERT to behave as an UPDATE or a no-op if the INSERT would violate a uniqueness constraint (primary key or unique index). + +> **Note**: Upsert apis are available from SQLite 3.35.0+: iOS 15.0+, macOS 12.0+, tvOS 15.0+, watchOS 8.0+, or with a [custom SQLite build] or [SQLCipher](#encryption). +> +> **Note**: With regard to [persistence callbacks](#available-callbacks), an upsert behaves exactly like an insert. In particular: the `aroundInsert(_:)` and `didInsert(_:)` callbacks reports the rowid of the inserted or updated row; `willUpdate`, `aroundUdate`, `didUdate` are not called. + +[PersistableRecord] provides three upsert methods: + +- `upsert(_:)` + + Inserts or updates a record. + + The upsert behavior is triggered by a violation of any uniqueness constraint on the table (primary key or unique index). In case of conflict, all columns but the primary key are overwritten with the inserted values: + + ```swift + struct Player: Encodable, PersistableRecord { + var id: Int64 + var name: String + var score: Int + } + + // INSERT INTO player (id, name, score) + // VALUES (1, 'Arthur', 1000) + // ON CONFLICT DO UPDATE SET + // name = excluded.name, + // score = excluded.score + let player = Player(id: 1, name: "Arthur", score: 1000) + try player.upsert(db) + ``` + +- `upsertAndFetch(_:onConflict:doUpdate:)` (requires [FetchableRecord] conformance) + + Inserts or updates a record, and returns the upserted record. + + The `onConflict` and `doUpdate` arguments let you further control the upsert behavior. Make sure you check the [SQLite UPSERT documentation](https://www.sqlite.org/lang_UPSERT.html) for detailed information. + + - `onConflict`: the "conflict target" is the array of columns in the uniqueness constraint (primary key or unique index) that triggers the upsert. + + If empty (the default), all uniqueness constraint are considered. + + - `doUpdate`: a closure that returns columns assignments to perform in case of conflict. Other columns are overwritten with the inserted values. + + By default, all inserted columns but the primary key and the conflict target are overwritten. + + In the example below, we upsert the new vocabulary word "jovial". It is inserted if that word is not already in the dictionary. Otherwise, `count` is incremented, `isTainted` is not overwritten, and `kind` is overwritten: + + ```swift + // CREATE TABLE vocabulary( + // word TEXT NOT NULL PRIMARY KEY, + // kind TEXT NOT NULL, + // isTainted BOOLEAN DEFAULT 0, + // count INT DEFAULT 1)) + struct Vocabulary: Encodable, PersistableRecord { + var word: String + var kind: String + var isTainted: Bool + } + + // INSERT INTO vocabulary(word, kind, isTainted) + // VALUES('jovial', 'adjective', 0) + // ON CONFLICT(word) DO UPDATE SET \ + // count = count + 1, -- on conflict, count is incremented + // kind = excluded.kind -- on conflict, kind is overwritten + // RETURNING * + let vocabulary = Vocabulary(word: "jovial", kind: "adjective", isTainted: false) + let upserted = try vocabulary.upsertAndFetch( + db, onConflict: ["word"], + doUpdate: { _ in + [Column("count") += 1, // on conflict, count is incremented + Column("isTainted").noOverwrite] // on conflict, isTainted is NOT overwritten + }) + ``` + + The `doUpdate` closure accepts an `excluded` TableAlias argument that refers to the inserted values that trigger the conflict. You can use it to specify an explicit overwrite, or to perform a computation. In the next example, the upsert keeps the maximum date in case of conflict: + + ```swift + // INSERT INTO message(id, text, date) + // VALUES(...) + // ON CONFLICT DO UPDATE SET \ + // text = excluded.text, + // date = MAX(date, excluded.date) + // RETURNING * + let upserted = try message.upsertAndFetch(doUpdate: { excluded in + // keep the maximum date in case of conflict + [Column("date").set(to: max(Column("date"), excluded["date"]))] + }) + ``` + +- `upsertAndFetch(_:as:onConflict:doUpdate:)` (does not require [FetchableRecord] conformance) + + This method is identical to `upsertAndFetch(_:onConflict:doUpdate:)` described above, but you can provide a distinct [FetchableRecord] record type as a result, in order to specify the returned columns. + +### Persistence Methods and the `RETURNING` clause + +SQLite is able to return values from a inserted, updated, or deleted row, with the [`RETURNING` clause](https://www.sqlite.org/lang_returning.html). + +> **Note**: Support for the `RETURNING` clause is available from SQLite 3.35.0+: iOS 15.0+, macOS 12.0+, tvOS 15.0+, watchOS 8.0+, or with a [custom SQLite build] or [SQLCipher](#encryption). + +The `RETURNING` clause helps dealing with database features such as auto-incremented ids, default values, and [generated columns](https://sqlite.org/gencol.html). You can, for example, insert a few columns and fetch the default or generated ones in one step. + +GRDB uses the `RETURNING` clause in all persistence methods that contain `AndFetch` in their name. + +For example, given a database table with an auto-incremented primary key and a default score: + +```swift +try dbQueue.write { db in + try db.execute(sql: """ + CREATE TABLE player( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL, + score INTEGER NOT NULL DEFAULT 1000) + """) +} +``` + +You can define a record type with full database information, and another partial record type that deals with a subset of columns: + +```swift +// A player with full database information +struct Player: Codable, PersistableRecord, FetchableRecord { + var id: Int64 + var name: String + var score: Int +} + +// A partial player +struct PartialPlayer: Encodable, PersistableRecord { + static let databaseTableName = "player" + var name: String +} +``` + +And now you can get a full player by inserting a partial one: + +```swift +try dbQueue.write { db in + let partialPlayer = PartialPlayer(name: "Alice") + + // INSERT INTO player (name) VALUES ('Alice') RETURNING * + if let player = try partialPlayer.insertAndFetch(db, as: Player.self) { + print(player.id) // The inserted id + print(player.name) // The inserted name + print(player.score) // The default score + } +} +``` + +For extra precision, you can select only the columns you need, and fetch the desired value from the provided prepared [`Statement`]: + +```swift +try dbQueue.write { db in + let partialPlayer = PartialPlayer(name: "Alice") + + // INSERT INTO player (name) VALUES ('Alice') RETURNING score + let score = try partialPlayer.insertAndFetch(db, selection: [Column("score")]) { statement in + try Int.fetchOne(statement) + } + print(score) // Prints 1000, the default score +} +``` + +There are other similar persistence methods, such as `upsertAndFetch`, `saveAndFetch`, `updateAndFetch`, `updateChangesAndFetch`, etc. They all behave like `upsert`, `save`, `update`, `updateChanges`, except that they return saved values. For example: + +```swift +// Save and return the saved player +let savedPlayer = try player.saveAndFetch(db) +``` + +See [Persistence Methods], [Upsert](#upsert), and [`updateChanges` methods](#the-updatechanges-methods) for more information. + +**Batch operations** can return updated or deleted values: + +> **Warning**: Make sure you check the [documentation of the `RETURNING` clause](https://www.sqlite.org/lang_returning.html), which describes important limitations and caveats for batch operations. + +```swift +let request = Player.filter(...)... + +// Fetch all deleted players +// DELETE FROM player RETURNING * +let deletedPlayers = try request.deleteAndFetchAll(db) // [Player] + +// Fetch a selection of columns from the deleted rows +// DELETE FROM player RETURNING name +let statement = try request.deleteAndFetchStatement(db, selection: [Column("name")]) +let deletedNames = try String.fetchSet(statement) + +// Fetch all updated players +// UPDATE player SET score = score + 10 RETURNING * +let updatedPlayers = try request.updateAndFetchAll(db, [Column("score") += 10]) // [Player] + +// Fetch a selection of columns from the updated rows +// UPDATE player SET score = score + 10 RETURNING score +let statement = try request.updateAndFetchStatement( + db, [Column("score") += 10], + select: [Column("score")]) +let updatedScores = try Int.fetchAll(statement) +``` + + +### Persistence Callbacks + +Your custom type may want to perform extra work when the persistence methods are invoked. + +To this end, your record type can implement **persistence callbacks**. Callbacks are methods that get called at certain moments of a record's life cycle. With callbacks it is possible to write code that will run whenever an record is inserted, updated, or deleted. + +In order to use a callback method, you need to provide its implementation. For example, a frequently used callback is `didInsert`, in the case of auto-incremented database ids: + +```swift +struct Player: MutablePersistableRecord { + var id: Int64? + + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} + +try dbQueue.write { db in + var player = Player(id: nil, ...) + try player.insert(db) + print(player.id) // didInsert was called: prints some non-nil id +} +``` + +When you subclass the [Record](#record-class) class, override the callback, and make sure you call `super` at some point of your implementation: + +```swift +class Player: Record { + var id: Int64? + + // Update auto-incremented id upon successful insertion + func didInsert(_ inserted: InsertionSuccess) { + super.didInsert(inserted) + id = inserted.rowID + } +} +``` + +Callbacks can also help implementing record validation: + +```swift +struct Link: PersistableRecord { + var url: URL + + func willSave(_ db: Database) throws { + if url.host == nil { + throw ValidationError("url must be absolute.") + } + } +} + +try link.insert(db) // Calls the willSave callback +try link.update(db) // Calls the willSave callback +try link.save(db) // Calls the willSave callback +try link.upsert(db) // Calls the willSave callback +``` + +#### Available Callbacks + +Here is a list with all the available [persistence callbacks], listed in the same order in which they will get called during the respective operations: + +- Inserting a record (all `record.insert` and `record.upsert` methods) + - `willSave` + - `aroundSave` + - `willInsert` + - `aroundInsert` + - `didInsert` + - `didSave` + +- Updating a record (all `record.update` methods) + - `willSave` + - `aroundSave` + - `willUpdate` + - `aroundUpdate` + - `didUpdate` + - `didSave` + +- Deleting a record (only the `record.delete(_:)` method) + - `willDelete` + - `aroundDelete` + - `didDelete` + +For detailed information about each callback, check the [reference](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/mutablepersistablerecord/). + +In the `MutablePersistableRecord` protocol, `willInsert` and `didInsert` are mutating methods. In `PersistableRecord`, they are not mutating. + +> **Note**: The `record.save(_:)` method performs an UPDATE if the record has a non-null primary key, and then, if no row was modified, an INSERT. It directly performs an INSERT if the record has no primary key, or a null primary key. It triggers update and/or insert callbacks accordingly. +> +> **Warning**: Callbacks are only invoked from persistence methods called on record instances. Callbacks are not invoked when you call a type method, perform a batch operations, or execute raw SQL. +> +> **Warning**: When a `did***` callback is invoked, do not assume that the change is actually persisted on disk, because the database may still be inside an uncommitted transaction. When you need to handle transaction completions, use the [afterNextTransaction(onCommit:onRollback:)](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/afternexttransaction(oncommit:onrollback:)). For example: +> +> ```swift +> struct PictureFile: PersistableRecord { +> var path: String +> +> func willDelete(_ db: Database) { +> db.afterNextTransaction { _ in +> try? deleteFileOnDisk() +> } +> } +> } +> ``` + + +## Identifiable Records + +**When a record type maps a table with a single-column primary key, it is recommended to have it adopt the standard [Identifiable] protocol.** + +```swift +struct Player: Identifiable, FetchableRecord, PersistableRecord { + var id: Int64 // fulfills the Identifiable requirement + var name: String + var score: Int +} +``` + +When `id` has a [database-compatible type](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible) (Int64, Int, String, UUID, ...), the `Identifiable` conformance unlocks type-safe record and request methods: + +```swift +let player = try Player.find(db, id: 1) // Player +let player = try Player.fetchOne(db, id: 1) // Player? +let players = try Player.fetchAll(db, ids: [1, 2, 3]) // [Player] +let players = try Player.fetchSet(db, ids: [1, 2, 3]) // Set + +let request = Player.filter(id: 1) +let request = Player.filter(ids: [1, 2, 3]) + +try Player.deleteOne(db, id: 1) +try Player.deleteAll(db, ids: [1, 2, 3]) +``` + +> **Note**: `Identifiable` is not available on all application targets, and not all tables have a single-column primary key. GRDB provides other methods that deal with primary and unique keys, but they won't check the type of their arguments: +> +> ```swift +> // Available on non-Identifiable types +> try Player.fetchOne(db, key: 1) +> try Player.fetchOne(db, key: ["email": "arthur@example.com"]) +> try Country.fetchAll(db, keys: ["FR", "US"]) +> try Citizenship.fetchOne(db, key: ["citizenId": 1, "countryCode": "FR"]) +> +> let request = Player.filter(key: 1) +> let request = Player.filter(keys: [1, 2, 3]) +> +> try Player.deleteOne(db, key: 1) +> try Player.deleteAll(db, keys: [1, 2, 3]) +> ``` + +> **Note**: It is not recommended to use `Identifiable` on record types that use an auto-incremented primary key: +> +> ```swift +> // AVOID declaring Identifiable conformance when key is auto-incremented +> struct Player { +> var id: Int64? // Not an id suitable for Identifiable +> var name: String +> var score: Int +> } +> +> extension Player: FetchableRecord, MutablePersistableRecord { +> // Update auto-incremented id upon successful insertion +> mutating func didInsert(_ inserted: InsertionSuccess) { +> id = inserted.rowID +> } +> } +> ``` +> +> For a detailed rationale, please see [issue #1435](https://github.com/groue/GRDB.swift/issues/1435#issuecomment-1740857712). + +Some database tables have a single-column primary key which is not called "id": + +```swift +try db.create(table: "country") { t in + t.primaryKey("isoCode", .text) + t.column("name", .text).notNull() + t.column("population", .integer).notNull() +} +``` + +In this case, `Identifiable` conformance can be achieved, for example, by returning the primary key column from the `id` property: + +```swift +struct Country: Identifiable, FetchableRecord, PersistableRecord { + var isoCode: String + var name: String + var population: Int + + // Fulfill the Identifiable requirement + var id: String { isoCode } +} + +let france = try dbQueue.read { db in + try Country.fetchOne(db, id: "FR") +} +``` + + +## Codable Records + +Record types that adopt an archival protocol ([Codable, Encodable or Decodable](https://developer.apple.com/documentation/foundation/archives_and_serialization/encoding_and_decoding_custom_types)) get free database support just by declaring conformance to the desired [record protocols](#record-protocols-overview): + +```swift +// Declare a record... +struct Player: Codable, FetchableRecord, PersistableRecord { + var name: String + var score: Int +} + +// ...and there you go: +try dbQueue.write { db in + try Player(name: "Arthur", score: 100).insert(db) + let players = try Player.fetchAll(db) +} +``` + +Codable records encode and decode their properties according to their own implementation of the Encodable and Decodable protocols. Yet databases have specific requirements: + +- Properties are always coded according to their preferred database representation, when they have one (all [values](#values) that adopt the [`DatabaseValueConvertible`] protocol). +- You can customize the encoding and decoding of dates and uuids. +- Complex properties (arrays, dictionaries, nested structs, etc.) are stored as JSON. + +For more information about Codable records, see: + +- [JSON Columns] +- [Column Names Coding Strategies] +- [Data, Date, and UUID Coding Strategies] +- [The userInfo Dictionary] +- [Tip: Derive Columns from Coding Keys](#tip-derive-columns-from-coding-keys) + +> :bulb: **Tip**: see the [Demo Applications] for sample code that uses Codable records. + + +### JSON Columns + +When a [Codable record](#codable-records) contains a property that is not a simple [value](#values) (Bool, Int, String, Date, Swift enums, etc.), that value is encoded and decoded as a **JSON string**. For example: + +```swift +enum AchievementColor: String, Codable { + case bronze, silver, gold +} + +struct Achievement: Codable { + var name: String + var color: AchievementColor +} + +struct Player: Codable, FetchableRecord, PersistableRecord { + var name: String + var score: Int + var achievements: [Achievement] // stored in a JSON column +} + +try dbQueue.write { db in + // INSERT INTO player (name, score, achievements) + // VALUES ( + // 'Arthur', + // 100, + // '[{"color":"gold","name":"Use Codable Records"}]') + let achievement = Achievement(name: "Use Codable Records", color: .gold) + let player = Player(name: "Arthur", score: 100, achievements: [achievement]) + try player.insert(db) +} +``` + +GRDB uses the standard [JSONDecoder](https://developer.apple.com/documentation/foundation/jsondecoder) and [JSONEncoder](https://developer.apple.com/documentation/foundation/jsonencoder) from Foundation. By default, Data values are handled with the `.base64` strategy, Date with the `.millisecondsSince1970` strategy, and non conforming floats with the `.throw` strategy. + +You can customize the JSON format by implementing those methods: + +```swift +protocol FetchableRecord { + static func databaseJSONDecoder(for column: String) -> JSONDecoder +} + +protocol EncodableRecord { + static func databaseJSONEncoder(for column: String) -> JSONEncoder +} +``` + +> :bulb: **Tip**: Make sure you set the JSONEncoder `sortedKeys` option. This option makes sure that the JSON output is stable. This stability is required for [Record Comparison] to work as expected, and database observation tools such as [ValueObservation] to accurately recognize changed records. + + +### Column Names Coding Strategies + +By default, [Codable Records] store their values into database columns that match their coding keys: the `teamID` property is stored into the `teamID` column. + +This behavior can be overridden, so that you can, for example, store the `teamID` property into the `team_id` column: + +```swift +protocol FetchableRecord { + static var databaseColumnDecodingStrategy: DatabaseColumnDecodingStrategy { get } +} + +protocol EncodableRecord { + static var databaseColumnEncodingStrategy: DatabaseColumnEncodingStrategy { get } +} +``` + +See [DatabaseColumnDecodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasecolumndecodingstrategy) and [DatabaseColumnEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasecolumnencodingstrategy/) to learn about all available strategies. + + +### Data, Date, and UUID Coding Strategies + +By default, [Codable Records] encode and decode their Data properties as blobs, and Date and UUID properties as described in the general [Date and DateComponents](#date-and-datecomponents) and [UUID](#uuid) chapters. + +To sum up: dates encode themselves in the "YYYY-MM-DD HH:MM:SS.SSS" format, in the UTC time zone, and decode a variety of date formats and timestamps. UUIDs encode themselves as 16-bytes data blobs, and decode both 16-bytes data blobs and strings such as "E621E1F8-C36C-495A-93FC-0C247A3E6E5F". + +Those behaviors can be overridden: + +```swift +protocol FetchableRecord { + static var databaseDataDecodingStrategy: DatabaseDataDecodingStrategy { get } + static var databaseDateDecodingStrategy: DatabaseDateDecodingStrategy { get } +} + +protocol EncodableRecord { + static var databaseDataEncodingStrategy: DatabaseDataEncodingStrategy { get } + static var databaseDateEncodingStrategy: DatabaseDateEncodingStrategy { get } + static var databaseUUIDEncodingStrategy: DatabaseUUIDEncodingStrategy { get } +} +``` + +See [DatabaseDataDecodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedatadecodingstrategy/), [DatabaseDateDecodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedatedecodingstrategy/), [DatabaseDataEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedataencodingstrategy/), [DatabaseDateEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasedateencodingstrategy/), and [DatabaseUUIDEncodingStrategy](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseuuidencodingstrategy/) to learn about all available strategies. + +There is no customization of uuid decoding, because UUID can already decode all its encoded variants (16-bytes blobs and uuid strings, both uppercase and lowercase). + +Customized coding strategies apply: + +- When encoding and decoding database rows to and from records (fetching and persistence methods). +- In requests by single-column primary key: `fetchOne(_:id:)`, `filter(id:)`, `deleteAll(_:keys:)`, etc. + +*They do not apply* in other requests based on data, date, or uuid values. + +So make sure that those are properly encoded in your requests. For example: + +```swift +struct Player: Codable, FetchableRecord, PersistableRecord, Identifiable { + // UUIDs are stored as strings + static let databaseUUIDEncodingStrategy = DatabaseUUIDEncodingStrategy.uppercaseString + var id: UUID + ... +} + +try dbQueue.write { db in + let uuid = UUID() + let player = Player(id: uuid, ...) + + // OK: inserts a player in the database, with a string uuid + try player.insert(db) + + // OK: performs a string-based query, finds the inserted player + _ = try Player.filter(id: uuid).fetchOne(db) + + // NOT OK: performs a blob-based query, fails to find the inserted player + _ = try Player.filter(Column("id") == uuid).fetchOne(db) + + // OK: performs a string-based query, finds the inserted player + _ = try Player.filter(Column("id") == uuid.uuidString).fetchOne(db) +} +``` + + +### The userInfo Dictionary + +Your [Codable Records] can be stored in the database, but they may also have other purposes. In this case, you may need to customize their implementations of `Decodable.init(from:)` and `Encodable.encode(to:)`, depending on the context. + +The standard way to provide such context is the `userInfo` dictionary. Implement those properties: + +```swift +protocol FetchableRecord { + static var databaseDecodingUserInfo: [CodingUserInfoKey: Any] { get } +} + +protocol EncodableRecord { + static var databaseEncodingUserInfo: [CodingUserInfoKey: Any] { get } +} +``` + +For example, here is a Player type that customizes its decoding: + +```swift +// A key that holds a decoder's name +let decoderName = CodingUserInfoKey(rawValue: "decoderName")! + +struct Player: FetchableRecord, Decodable { + init(from decoder: Decoder) throws { + // Print the decoder name + let decoderName = decoder.userInfo[decoderName] as? String + print("Decoded from \(decoderName ?? "unknown decoder")") + ... + } +} +``` + +You can have a specific decoding from JSON... + +```swift +// prints "Decoded from JSON" +let decoder = JSONDecoder() +decoder.userInfo = [decoderName: "JSON"] +let player = try decoder.decode(Player.self, from: jsonData) +``` + +... and another one from database rows: + +```swift +extension Player: FetchableRecord { + static let databaseDecodingUserInfo: [CodingUserInfoKey: Any] = [decoderName: "database row"] +} + +// prints "Decoded from database row" +let player = try Player.fetchOne(db, ...) +``` + +> **Note**: make sure the `databaseDecodingUserInfo` and `databaseEncodingUserInfo` properties are explicitly declared as `[CodingUserInfoKey: Any]`. If they are not, the Swift compiler may silently miss the protocol requirement, resulting in sticky empty userInfo. + + +### Tip: Derive Columns from Coding Keys + +Codable types are granted with a [CodingKeys](https://developer.apple.com/documentation/foundation/archives_and_serialization/encoding_and_decoding_custom_types) enum. You can use them to safely define database columns: + +```swift +struct Player: Codable { + var id: Int64 + var name: String + var score: Int +} + +extension Player: FetchableRecord, PersistableRecord { + enum Columns { + static let id = Column(CodingKeys.id) + static let name = Column(CodingKeys.name) + static let score = Column(CodingKeys.score) + } +} +``` + +See the [query interface](#the-query-interface) and [Recommended Practices for Designing Record Types](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordrecommendedpractices) for further information. + + +## Record Class + +**Record** is a class that is designed to be subclassed. It inherits its features from the [FetchableRecord, TableRecord, and PersistableRecord](#record-protocols-overview) protocols. On top of that, Record instances can compare against previous versions of themselves in order to [avoid useless updates](#record-comparison). + +Record subclasses define their custom database relationship by overriding database methods. For example: + +```swift +class Place: Record { + var id: Int64? + var title: String + var isFavorite: Bool + var coordinate: CLLocationCoordinate2D + + init(id: Int64?, title: String, isFavorite: Bool, coordinate: CLLocationCoordinate2D) { + self.id = id + self.title = title + self.isFavorite = isFavorite + self.coordinate = coordinate + super.init() + } + + /// The table name + override class var databaseTableName: String { "place" } + + /// The table columns + enum Columns: String, ColumnExpression { + case id, title, favorite, latitude, longitude + } + + /// Creates a record from a database row + required init(row: Row) throws { + id = row[Columns.id] + title = row[Columns.title] + isFavorite = row[Columns.favorite] + coordinate = CLLocationCoordinate2D( + latitude: row[Columns.latitude], + longitude: row[Columns.longitude]) + try super.init(row: row) + } + + /// The values persisted in the database + override func encode(to container: inout PersistenceContainer) throws { + container[Columns.id] = id + container[Columns.title] = title + container[Columns.favorite] = isFavorite + container[Columns.latitude] = coordinate.latitude + container[Columns.longitude] = coordinate.longitude + } + + /// Update record ID after a successful insertion + override func didInsert(_ inserted: InsertionSuccess) { + super.didInsert(inserted) + id = inserted.rowID + } +} +``` + + +## Record Comparison + +**Records that adopt the [EncodableRecord] protocol can compare against other records, or against previous versions of themselves.** + +This helps avoiding costly UPDATE statements when a record has not been edited. + +- [The `updateChanges` Methods](#the-updatechanges-methods) +- [The `databaseEquals` Method](#the-databaseequals-method) +- [The `databaseChanges` and `hasDatabaseChanges` Methods](#the-databasechanges-and-hasdatabasechanges-methods) + + +### The `updateChanges` Methods + +The `updateChanges` methods perform a database update of the changed columns only (and does nothing if record has no change). + +- `updateChanges(_:from:)` + + This method lets you compare two records: + + ```swift + if let oldPlayer = try Player.fetchOne(db, id: 42) { + var newPlayer = oldPlayer + newPlayer.score = 100 + if try newPlayer.updateChanges(db, from: oldPlayer) { + print("player was modified, and updated in the database") + } else { + print("player was not modified, and database was not hit") + } + } + ``` + +- `updateChanges(_:modify:)` + + This method lets you update a record in place: + + ```swift + if var player = try Player.fetchOne(db, id: 42) { + let modified = try player.updateChanges(db) { + $0.score = 100 + } + if modified { + print("player was modified, and updated in the database") + } else { + print("player was not modified, and database was not hit") + } + } + ``` + +- `updateChanges(_:)` (Record class only) + + Instances of the [Record](#record-class) class are able to compare against themselves, and know if they have changes that have not been saved since the last fetch or saving: + + ```swift + // Record class only + if let player = try Player.fetchOne(db, id: 42) { + player.score = 100 + if try player.updateChanges(db) { + print("player was modified, and updated in the database") + } else { + print("player was not modified, and database was not hit") + } + } + ``` + + +### The `databaseEquals` Method + +This method returns whether two records have the same database representation: + +```swift +let oldPlayer: Player = ... +var newPlayer: Player = ... +if newPlayer.databaseEquals(oldPlayer) == false { + try newPlayer.save(db) +} +``` + +> **Note**: The comparison is performed on the database representation of records. As long as your record type adopts the EncodableRecord protocol, you don't need to care about Equatable. + + +### The `databaseChanges` and `hasDatabaseChanges` Methods + +`databaseChanges(from:)` returns a dictionary of differences between two records: + +```swift +let oldPlayer = Player(id: 1, name: "Arthur", score: 100) +let newPlayer = Player(id: 1, name: "Arthur", score: 1000) +for (column, oldValue) in try newPlayer.databaseChanges(from: oldPlayer) { + print("\(column) was \(oldValue)") +} +// prints "score was 100" +``` + +The [Record](#record-class) class is able to compare against itself: + +```swift +// Record class only +let player = Player(id: 1, name: "Arthur", score: 100) +try player.insert(db) +player.score = 1000 +for (column, oldValue) in try player.databaseChanges { + print("\(column) was \(oldValue)") +} +// prints "score was 100" +``` + +[Record](#record-class) instances also have a `hasDatabaseChanges` property: + +```swift +// Record class only +player.score = 1000 +if player.hasDatabaseChanges { + try player.save(db) +} +``` + +`Record.hasDatabaseChanges` is false after a Record instance has been fetched or saved into the database. Subsequent modifications may set it, or not: `hasDatabaseChanges` is based on value comparison. **Setting a property to the same value does not set the changed flag**: + +```swift +let player = Player(name: "Barbara", score: 750) +player.hasDatabaseChanges // true + +try player.insert(db) +player.hasDatabaseChanges // false + +player.name = "Barbara" +player.hasDatabaseChanges // false + +player.score = 1000 +player.hasDatabaseChanges // true +try player.databaseChanges // ["score": 750] +``` + +For an efficient algorithm which synchronizes the content of a database table with a JSON payload, check [groue/SortedDifference](https://github.com/groue/SortedDifference). + + +## Record Customization Options + +GRDB records come with many default behaviors, that are designed to fit most situations. Many of those defaults can be customized for your specific needs: + +- [Persistence Callbacks]: define what happens when you call a persistence method such as `player.insert(db)` +- [Conflict Resolution]: Run `INSERT OR REPLACE` queries, and generally define what happens when a persistence method violates a unique index. +- [Columns Selected by a Request]: define which columns are selected by requests such as `Player.fetchAll(db)`. +- [Beyond FetchableRecord]: the FetchableRecord protocol is not the end of the story. + +[Codable Records] have a few extra options: + +- [JSON Columns]: control the format of JSON columns. +- [Column Names Coding Strategies]: control how coding keys are turned into column names +- [Date and UUID Coding Strategies]: control the format of Date and UUID properties in your Codable records. +- [The userInfo Dictionary]: adapt your Codable implementation for the database. + + +### Conflict Resolution + +**Insertions and updates can create conflicts**: for example, a query may attempt to insert a duplicate row that violates a unique index. + +Those conflicts normally end with an error. Yet SQLite let you alter the default behavior, and handle conflicts with specific policies. For example, the `INSERT OR REPLACE` statement handles conflicts with the "replace" policy which replaces the conflicting row instead of throwing an error. + +The [five different policies](https://www.sqlite.org/lang_conflict.html) are: abort (the default), replace, rollback, fail, and ignore. + +**SQLite let you specify conflict policies at two different places:** + +- In the definition of the database table: + + ```swift + // CREATE TABLE player ( + // id INTEGER PRIMARY KEY AUTOINCREMENT, + // email TEXT UNIQUE ON CONFLICT REPLACE + // ) + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("email", .text).unique(onConflict: .replace) // <-- + } + + // Despite the unique index on email, both inserts succeed. + // The second insert replaces the first row: + try db.execute(sql: "INSERT INTO player (email) VALUES (?)", arguments: ["arthur@example.com"]) + try db.execute(sql: "INSERT INTO player (email) VALUES (?)", arguments: ["arthur@example.com"]) + ``` + +- In each modification query: + + ```swift + // CREATE TABLE player ( + // id INTEGER PRIMARY KEY AUTOINCREMENT, + // email TEXT UNIQUE + // ) + try db.create(table: "player") { t in + t.autoIncrementedPrimaryKey("id") + t.column("email", .text).unique() + } + + // Again, despite the unique index on email, both inserts succeed. + try db.execute(sql: "INSERT OR REPLACE INTO player (email) VALUES (?)", arguments: ["arthur@example.com"]) + try db.execute(sql: "INSERT OR REPLACE INTO player (email) VALUES (?)", arguments: ["arthur@example.com"]) + ``` + +When you want to handle conflicts at the query level, specify a custom `persistenceConflictPolicy` in your type that adopts the PersistableRecord protocol. It will alter the INSERT and UPDATE queries run by the `insert`, `update` and `save` [persistence methods]: + +```swift +protocol MutablePersistableRecord { + /// The policy that handles SQLite conflicts when records are + /// inserted or updated. + /// + /// This property is optional: its default value uses the ABORT + /// policy for both insertions and updates, so that GRDB generate + /// regular INSERT and UPDATE queries. + static var persistenceConflictPolicy: PersistenceConflictPolicy { get } +} + +struct Player : MutablePersistableRecord { + static let persistenceConflictPolicy = PersistenceConflictPolicy( + insert: .replace, + update: .replace) +} + +// INSERT OR REPLACE INTO player (...) VALUES (...) +try player.insert(db) +``` + +> **Note**: If you specify the `ignore` policy for inserts, the [`didInsert` callback](#persistence-callbacks) will be called with some random id in case of failed insert. You can detect failed insertions with `insertAndFetch`: +> +> ```swift +> // How to detect failed `INSERT OR IGNORE`: +> // INSERT OR IGNORE INTO player ... RETURNING * +> if let insertedPlayer = try player.insertAndFetch(db) { +> // Succesful insertion +> } else { +> // Ignored failure +> } +> ``` +> +> **Note**: The `replace` policy may have to delete rows so that inserts and updates can succeed. Those deletions are not reported to [transaction observers](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/transactionobserver) (this might change in a future release of SQLite). + +### Beyond FetchableRecord + +**Some GRDB users eventually discover that the [FetchableRecord] protocol does not fit all situations.** Use cases that are not well handled by FetchableRecord include: + +- Your application needs polymorphic row decoding: it decodes some type or another, depending on the values contained in a database row. + +- Your application needs to decode rows with a context: each decoded value should be initialized with some extra value that does not come from the database. + +Since those use cases are not well handled by FetchableRecord, don't try to implement them on top of this protocol: you'll just fight the framework. + + +## Examples of Record Definitions + +We will show below how to declare a record type for the following database table: + +```swift +try dbQueue.write { db in + try db.create(table: "place") { t in + t.autoIncrementedPrimaryKey("id") + t.column("title", .text).notNull() + t.column("isFavorite", .boolean).notNull().defaults(to: false) + t.column("longitude", .double).notNull() + t.column("latitude", .double).notNull() + } +} +``` + +Each one of the three examples below is correct. You will pick one or the other depending on your personal preferences and the requirements of your application: + +
    + Define a Codable struct, and adopt the record protocols you need + +This is the shortest way to define a record type. + +See the [Record Protocols Overview](#record-protocols-overview), and [Codable Records] for more information. + +```swift +struct Place: Codable { + var id: Int64? + var title: String + var isFavorite: Bool + private var latitude: CLLocationDegrees + private var longitude: CLLocationDegrees + + var coordinate: CLLocationCoordinate2D { + get { + CLLocationCoordinate2D( + latitude: latitude, + longitude: longitude) + } + set { + latitude = newValue.latitude + longitude = newValue.longitude + } + } +} + +// SQL generation +extension Place: TableRecord { + /// The table columns + enum Columns { + static let id = Column(CodingKeys.id) + static let title = Column(CodingKeys.title) + static let isFavorite = Column(CodingKeys.isFavorite) + static let latitude = Column(CodingKeys.latitude) + static let longitude = Column(CodingKeys.longitude) + } +} + +// Fetching methods +extension Place: FetchableRecord { } + +// Persistence methods +extension Place: MutablePersistableRecord { + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} +``` + +
    + +
    + Define a plain struct, and adopt the record protocols you need + +See the [Record Protocols Overview](#record-protocols-overview) for more information. + +```swift +struct Place { + var id: Int64? + var title: String + var isFavorite: Bool + var coordinate: CLLocationCoordinate2D +} + +// SQL generation +extension Place: TableRecord { + /// The table columns + enum Columns: String, ColumnExpression { + case id, title, isFavorite, latitude, longitude + } +} + +// Fetching methods +extension Place: FetchableRecord { + /// Creates a record from a database row + init(row: Row) { + id = row[Columns.id] + title = row[Columns.title] + isFavorite = row[Columns.isFavorite] + coordinate = CLLocationCoordinate2D( + latitude: row[Columns.latitude], + longitude: row[Columns.longitude]) + } +} + +// Persistence methods +extension Place: MutablePersistableRecord { + /// The values persisted in the database + func encode(to container: inout PersistenceContainer) { + container[Columns.id] = id + container[Columns.title] = title + container[Columns.isFavorite] = isFavorite + container[Columns.latitude] = coordinate.latitude + container[Columns.longitude] = coordinate.longitude + } + + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} +``` + +
    + +
    + Define a plain struct optimized for fetching performance + +This struct derives its persistence methods from the standard Encodable protocol (see [Codable Records]), but performs optimized row decoding by accessing database columns with numeric indexes. + +See the [Record Protocols Overview](#record-protocols-overview) for more information. + +```swift +struct Place: Encodable { + var id: Int64? + var title: String + var isFavorite: Bool + private var latitude: CLLocationDegrees + private var longitude: CLLocationDegrees + + var coordinate: CLLocationCoordinate2D { + get { + CLLocationCoordinate2D( + latitude: latitude, + longitude: longitude) + } + set { + latitude = newValue.latitude + longitude = newValue.longitude + } + } +} + +// SQL generation +extension Place: TableRecord { + /// The table columns + enum Columns { + static let id = Column(CodingKeys.id) + static let title = Column(CodingKeys.title) + static let isFavorite = Column(CodingKeys.isFavorite) + static let latitude = Column(CodingKeys.latitude) + static let longitude = Column(CodingKeys.longitude) + } + + /// Arrange the selected columns and lock their order + static let databaseSelection: [any SQLSelectable] = [ + Columns.id, + Columns.title, + Columns.favorite, + Columns.latitude, + Columns.longitude] +} + +// Fetching methods +extension Place: FetchableRecord { + /// Creates a record from a database row + init(row: Row) { + // For high performance, use numeric indexes that match the + // order of Place.databaseSelection + id = row[0] + title = row[1] + isFavorite = row[2] + coordinate = CLLocationCoordinate2D( + latitude: row[3], + longitude: row[4]) + } +} + +// Persistence methods +extension Place: MutablePersistableRecord { + // Update auto-incremented id upon successful insertion + mutating func didInsert(_ inserted: InsertionSuccess) { + id = inserted.rowID + } +} +``` + +
    + +
    + Subclass the Record class + +See the [Record class](#record-class) for more information. + +```swift +class Place: Record { + var id: Int64? + var title: String + var isFavorite: Bool + var coordinate: CLLocationCoordinate2D + + init(id: Int64?, title: String, isFavorite: Bool, coordinate: CLLocationCoordinate2D) { + self.id = id + self.title = title + self.isFavorite = isFavorite + self.coordinate = coordinate + super.init() + } + + /// The table name + override class var databaseTableName: String { "place" } + + /// The table columns + enum Columns: String, ColumnExpression { + case id, title, isFavorite, latitude, longitude + } + + /// Creates a record from a database row + required init(row: Row) throws { + id = row[Columns.id] + title = row[Columns.title] + isFavorite = row[Columns.isFavorite] + coordinate = CLLocationCoordinate2D( + latitude: row[Columns.latitude], + longitude: row[Columns.longitude]) + try super.init(row: row) + } + + /// The values persisted in the database + override func encode(to container: inout PersistenceContainer) throws { + container[Columns.id] = id + container[Columns.title] = title + container[Columns.isFavorite] = isFavorite + container[Columns.latitude] = coordinate.latitude + container[Columns.longitude] = coordinate.longitude + } + + // Update auto-incremented id upon successful insertion + override func didInsert(_ inserted: InsertionSuccess) { + super.didInsert(inserted) + id = inserted.rowID + } +} +``` + +
    + + +## List of Record Methods + +This is the list of record methods, along with their required protocols. The [Record](#record-class) class adopts all these protocols, and adds a few extra methods. + +| Method | Protocols | Notes | +| ------ | --------- | :---: | +| **Core Methods** | | | +| `init(row:)` | [FetchableRecord] | | +| `Type.databaseTableName` | [TableRecord] | | +| `Type.databaseSelection` | [TableRecord] | [*](#columns-selected-by-a-request) | +| `Type.persistenceConflictPolicy` | [PersistableRecord] | [*](#conflict-resolution) | +| `record.encode(to:)` | [EncodableRecord] | | +| **Insert and Update Records** | | | +| `record.insert(db)` | [PersistableRecord] | | +| `record.insertAndFetch(db)` | [PersistableRecord] & [FetchableRecord] | | +| `record.insertAndFetch(_:as:)` | [PersistableRecord] | | +| `record.insertAndFetch(_:selection:fetch:)` | [PersistableRecord] | | +| `record.inserted(db)` | [PersistableRecord] | | +| `record.save(db)` | [PersistableRecord] | | +| `record.saveAndFetch(db)` | [PersistableRecord] & [FetchableRecord] | | +| `record.saveAndFetch(_:as:)` | [PersistableRecord] | | +| `record.saveAndFetch(_:selection:fetch:)` | [PersistableRecord] | | +| `record.saved(db)` | [PersistableRecord] | | +| `record.update(db)` | [PersistableRecord] | | +| `record.updateAndFetch(db)` | [PersistableRecord] & [FetchableRecord] | | +| `record.updateAndFetch(_:as:)` | [PersistableRecord] | | +| `record.updateAndFetch(_:selection:fetch:)` | [PersistableRecord] | | +| `record.update(db, columns:...)` | [PersistableRecord] | | +| `record.updateAndFetch(_:columns:selection:fetch:)` | [PersistableRecord] | | +| `record.updateChanges(db, from:...)` | [PersistableRecord] | [*](#record-comparison) | +| `record.updateChanges(db) { ... }` | [PersistableRecord] | [*](#record-comparison) | +| `record.updateChangesAndFetch(_:columns:as:modify:)` | [PersistableRecord] | | +| `record.updateChangesAndFetch(_:columns:selection:fetch:modify:)` | [PersistableRecord] | | +| `record.updateChanges(db)` | [Record](#record-class) | [*](#record-comparison) | +| `record.upsert(db)` | [PersistableRecord] | | +| `record.upsertAndFetch(db)` | [PersistableRecord] & [FetchableRecord] | | +| `record.upsertAndFetch(_:as:)` | [PersistableRecord] | | +| `Type.updateAll(db, ...)` | [TableRecord] | | +| `Type.filter(...).updateAll(db, ...)` | [TableRecord] | ² | +| **Delete Records** | | | +| `record.delete(db)` | [PersistableRecord] | | +| `Type.deleteOne(db, key:...)` | [TableRecord] | ¹ | +| `Type.deleteOne(db, id:...)` | [TableRecord] & [Identifiable] | ¹ | +| `Type.deleteAll(db)` | [TableRecord] | | +| `Type.deleteAll(db, keys:...)` | [TableRecord] | ¹ | +| `Type.deleteAll(db, ids:...)` | [TableRecord] & [Identifiable] | ¹ | +| `Type.filter(...).deleteAll(db)` | [TableRecord] | ² | +| **Persistence Callbacks** | | | +| `record.willInsert(_:)` | [PersistableRecord] | | +| `record.aroundInsert(_:insert:)` | [PersistableRecord] | | +| `record.didInsert(_:)` | [PersistableRecord] | | +| `record.willUpdate(_:columns:)` | [PersistableRecord] | | +| `record.aroundUpdate(_:columns:update:)` | [PersistableRecord] | | +| `record.didUpdate(_:)` | [PersistableRecord] | | +| `record.willSave(_:)` | [PersistableRecord] | | +| `record.aroundSave(_:save:)` | [PersistableRecord] | | +| `record.didSave(_:)` | [PersistableRecord] | | +| `record.willDelete(_:)` | [PersistableRecord] | | +| `record.aroundDelete(_:delete:)` | [PersistableRecord] | | +| `record.didDelete(deleted:)` | [PersistableRecord] | | +| **Check Record Existence** | | | +| `record.exists(db)` | [PersistableRecord] | | +| `Type.exists(db, key: ...)` | [TableRecord] | ¹ | +| `Type.exists(db, id: ...)` | [TableRecord] & [Identifiable] | ¹ | +| `Type.filter(...).isEmpty(db)` | [TableRecord] | ² | +| **Convert Record to Dictionary** | | | +| `record.databaseDictionary` | [EncodableRecord] | | +| **Count Records** | | | +| `Type.fetchCount(db)` | [TableRecord] | | +| `Type.filter(...).fetchCount(db)` | [TableRecord] | ² | +| **Fetch Record [Cursors](#cursors)** | | | +| `Type.fetchCursor(db)` | [FetchableRecord] & [TableRecord] | | +| `Type.fetchCursor(db, keys:...)` | [FetchableRecord] & [TableRecord] | ¹ | +| `Type.fetchCursor(db, ids:...)` | [FetchableRecord] & [TableRecord] & [Identifiable] | ¹ | +| `Type.fetchCursor(db, sql: sql)` | [FetchableRecord] | ³ | +| `Type.fetchCursor(statement)` | [FetchableRecord] | | +| `Type.filter(...).fetchCursor(db)` | [FetchableRecord] & [TableRecord] | ² | +| **Fetch Record Arrays** | | | +| `Type.fetchAll(db)` | [FetchableRecord] & [TableRecord] | | +| `Type.fetchAll(db, keys:...)` | [FetchableRecord] & [TableRecord] | ¹ | +| `Type.fetchAll(db, ids:...)` | [FetchableRecord] & [TableRecord] & [Identifiable] | ¹ | +| `Type.fetchAll(db, sql: sql)` | [FetchableRecord] | ³ | +| `Type.fetchAll(statement)` | [FetchableRecord] | | +| `Type.filter(...).fetchAll(db)` | [FetchableRecord] & [TableRecord] | ² | +| **Fetch Record Sets** | | | +| `Type.fetchSet(db)` | [FetchableRecord] & [TableRecord] | | +| `Type.fetchSet(db, keys:...)` | [FetchableRecord] & [TableRecord] | ¹ | +| `Type.fetchSet(db, ids:...)` | [FetchableRecord] & [TableRecord] & [Identifiable] | ¹ | +| `Type.fetchSet(db, sql: sql)` | [FetchableRecord] | ³ | +| `Type.fetchSet(statement)` | [FetchableRecord] | | +| `Type.filter(...).fetchSet(db)` | [FetchableRecord] & [TableRecord] | ² | +| **Fetch Individual Records** | | | +| `Type.fetchOne(db)` | [FetchableRecord] & [TableRecord] | | +| `Type.fetchOne(db, key:...)` | [FetchableRecord] & [TableRecord] | ¹ | +| `Type.fetchOne(db, id:...)` | [FetchableRecord] & [TableRecord] & [Identifiable] | ¹ | +| `Type.fetchOne(db, sql: sql)` | [FetchableRecord] | ³ | +| `Type.fetchOne(statement)` | [FetchableRecord] | | +| `Type.filter(...).fetchOne(db)` | [FetchableRecord] & [TableRecord] | ² | +| `Type.find(db, key:...)` | [FetchableRecord] & [TableRecord] | ¹ | +| `Type.find(db, id:...)` | [FetchableRecord] & [TableRecord] & [Identifiable] | ¹ | +| **[Codable Records]** | | | +| `Type.databaseDecodingUserInfo` | [FetchableRecord] | [*](#the-userinfo-dictionary) | +| `Type.databaseJSONDecoder(for:)` | [FetchableRecord] | [*](#json-columns) | +| `Type.databaseDateDecodingStrategy` | [FetchableRecord] | [*](#data-date-and-uuid-coding-strategies) | +| `Type.databaseEncodingUserInfo` | [EncodableRecord] | [*](#the-userinfo-dictionary) | +| `Type.databaseJSONEncoder(for:)` | [EncodableRecord] | [*](#json-columns) | +| `Type.databaseDateEncodingStrategy` | [EncodableRecord] | [*](#data-date-and-uuid-coding-strategies) | +| `Type.databaseUUIDEncodingStrategy` | [EncodableRecord] | [*](#data-date-and-uuid-coding-strategies) | +| **Define [Associations]** | | | +| `Type.belongsTo(...)` | [TableRecord] | [*](Documentation/AssociationsBasics.md) | +| `Type.hasMany(...)` | [TableRecord] | [*](Documentation/AssociationsBasics.md) | +| `Type.hasOne(...)` | [TableRecord] | [*](Documentation/AssociationsBasics.md) | +| `Type.hasManyThrough(...)` | [TableRecord] | [*](Documentation/AssociationsBasics.md) | +| `Type.hasOneThrough(...)` | [TableRecord] | [*](Documentation/AssociationsBasics.md) | +| **Building Query Interface [Requests](#requests)** | | | +| `record.request(for:...)` | [TableRecord] & [EncodableRecord] | [*](Documentation/AssociationsBasics.md) | +| `Type.all()` | [TableRecord] | ² | +| `Type.none()` | [TableRecord] | ² | +| `Type.select(...)` | [TableRecord] | ² | +| `Type.select(..., as:...)` | [TableRecord] | ² | +| `Type.selectPrimaryKey(as:...)` | [TableRecord] | ² | +| `Type.annotated(with:...)` | [TableRecord] | ² | +| `Type.filter(...)` | [TableRecord] | ² | +| `Type.filter(id:)` | [TableRecord] & Identifiable | [*](#identifiable-records) | +| `Type.filter(ids:)` | [TableRecord] & Identifiable | [*](#identifiable-records) | +| `Type.matching(...)` | [TableRecord] | ² | +| `Type.including(all:)` | [TableRecord] | ² | +| `Type.including(optional:)` | [TableRecord] | ² | +| `Type.including(required:)` | [TableRecord] | ² | +| `Type.joining(optional:)` | [TableRecord] | ² | +| `Type.joining(required:)` | [TableRecord] | ² | +| `Type.group(...)` | [TableRecord] | ² | +| `Type.groupByPrimaryKey()` | [TableRecord] | ² | +| `Type.having(...)` | [TableRecord] | ² | +| `Type.order(...)` | [TableRecord] | ² | +| `Type.orderByPrimaryKey()` | [TableRecord] | ² | +| `Type.limit(...)` | [TableRecord] | ² | +| `Type.with(...)` | [TableRecord] | ² | +| **[Record Comparison]** | | | +| `record.databaseEquals(...)` | [EncodableRecord] | | +| `record.databaseChanges(from:...)` | [EncodableRecord] | | +| `record.updateChanges(db, from:...)` | [PersistableRecord] | | +| `record.updateChanges(db) { ... }` | [PersistableRecord] | | +| `record.hasDatabaseChanges` | [Record](#record-class) | | +| `record.databaseChanges` | [Record](#record-class) | | +| `record.updateChanges(db)` | [Record](#record-class) | | + +¹ All unique keys are supported: primary keys (single-column, composite, [`rowid`](https://www.sqlite.org/rowidtable.html)) and unique indexes: + +```swift +try Player.fetchOne(db, id: 1) // Player? +try Player.fetchOne(db, key: ["email": "arthur@example.com"]) // Player? +try Country.fetchAll(db, keys: ["FR", "US"]) // [Country] +``` + +² See [Fetch Requests](#requests): + +```swift +let request = Player.filter(emailColumn != nil).order(nameColumn) +let players = try request.fetchAll(db) // [Player] +let count = try request.fetchCount(db) // Int +``` + +³ See [SQL queries](#fetch-queries): + +```swift +let player = try Player.fetchOne(db, sql: "SELECT * FROM player WHERE id = ?", arguments: [1]) // Player? +``` + + See [`Statement`]: + +```swift +let statement = try db.makeStatement(sql: "SELECT * FROM player WHERE id = ?") +let player = try Player.fetchOne(statement, arguments: [1]) // Player? +``` + + +The Query Interface +=================== + +**The query interface lets you write pure Swift instead of SQL:** + +```swift +try dbQueue.write { db in + // Update database schema + try db.create(table: "wine") { t in ... } + + // Fetch records + let wines = try Wine + .filter(originColumn == "Burgundy") + .order(priceColumn) + .fetchAll(db) + + // Count + let count = try Wine + .filter(colorColumn == Color.red) + .fetchCount(db) + + // Update + try Wine + .filter(originColumn == "Burgundy") + .updateAll(db, priceColumn *= 0.75) + + // Delete + try Wine + .filter(corkedColumn == true) + .deleteAll(db) +} +``` + +You need to open a [database connection] before you can query the database. + +Please bear in mind that the query interface can not generate all possible SQL queries. You may also *prefer* writing SQL, and this is just OK. From little snippets to full queries, your SQL skills are welcome: + +```swift +try dbQueue.write { db in + // Update database schema (with SQL) + try db.execute(sql: "CREATE TABLE wine (...)") + + // Fetch records (with SQL) + let wines = try Wine.fetchAll(db, + sql: "SELECT * FROM wine WHERE origin = ? ORDER BY price", + arguments: ["Burgundy"]) + + // Count (with an SQL snippet) + let count = try Wine + .filter(sql: "color = ?", arguments: [Color.red]) + .fetchCount(db) + + // Update (with SQL) + try db.execute(sql: "UPDATE wine SET price = price * 0.75 WHERE origin = 'Burgundy'") + + // Delete (with SQL) + try db.execute(sql: "DELETE FROM wine WHERE corked") +} +``` + +So don't miss the [SQL API](#sqlite-api). + +> **Note**: the generated SQL may change between GRDB releases, without notice: don't have your application rely on any specific SQL output. + +- [The Database Schema](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseschema) +- [Requests](#requests) +- [Expressions](#expressions) + - [SQL Operators](#sql-operators) + - [SQL Functions](#sql-functions) +- [Embedding SQL in Query Interface Requests] +- [Fetching from Requests] +- [Fetching by Key](#fetching-by-key) +- [Testing for Record Existence](#testing-for-record-existence) +- [Fetching Aggregated Values](#fetching-aggregated-values) +- [Delete Requests](#delete-requests) +- [Update Requests](#update-requests) +- [Custom Requests](#custom-requests) +- :blue_book: [Associations and Joins](Documentation/AssociationsBasics.md) +- :blue_book: [Common Table Expressions] +- :blue_book: [Query Interface Organization] + +## Requests + +📖 [`QueryInterfaceRequest`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest), [`Table`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/table) + +**The query interface requests** let you fetch values from the database: + +```swift +let request = Player.filter(emailColumn != nil).order(nameColumn) +let players = try request.fetchAll(db) // [Player] +let count = try request.fetchCount(db) // Int +``` + +Query interface requests usually start from **a type** that adopts the `TableRecord` protocol, such as a `Record` subclass (see [Records](#records)): + +```swift +class Player: Record { ... } + +// The request for all players: +let request = Player.all() +let players = try request.fetchAll(db) // [Player] +``` + +When you can not use a record type, use `Table`: + +```swift +// The request for all rows from the player table: +let table = Table("player") +let request = table.all() +let rows = try request.fetchAll(db) // [Row] + +// The request for all players from the player table: +let table = Table("player") +let request = table.all() +let players = try request.fetchAll(db) // [Player] +``` + +> **Note**: all examples in the documentation below use a record type, but you can always substitute a `Table` instead. + +Next, declare the table **columns** that you want to use for filtering, or sorting: + +```swift +let idColumn = Column("id") +let nameColumn = Column("name") +``` + +You can also declare column enums, if you prefer: + +```swift +// Columns.id and Columns.name can be used just as +// idColumn and nameColumn declared above. +enum Columns: String, ColumnExpression { + case id + case name +} +``` + +You can now build requests with the following methods: `all`, `none`, `select`, `distinct`, `filter`, `matching`, `group`, `having`, `order`, `reversed`, `limit`, `joining`, `including`, `with`. All those methods return another request, which you can further refine by applying another method: `Player.select(...).filter(...).order(...)`. + +- [`all()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerecord/all()), [`none()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerecord/none()): the requests for all rows, or no row. + + ```swift + // SELECT * FROM player + Player.all() + ``` + + By default, all columns are selected. See [Columns Selected by a Request]. + +- [`select(...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/selectionrequest/select(_:)-30yzl) and [`select(..., as:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/select(_:as:)-282xc) define the selected columns. See [Columns Selected by a Request]. + + ```swift + // SELECT name FROM player + Player.select(nameColumn, as: String.self) + ``` + +- [`annotated(with: expression...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/selectionrequest/annotated(with:)-6ehs4) extends the selection. + + ```swift + // SELECT *, (score + bonus) AS total FROM player + Player.annotated(with: (scoreColumn + bonusColumn).forKey("total")) + ``` + +- [`annotated(with: aggregate)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/annotated(with:)-74xfs) extends the selection with [association aggregates](Documentation/AssociationsBasics.md#association-aggregates). + + ```swift + // SELECT team.*, COUNT(DISTINCT player.id) AS playerCount + // FROM team + // LEFT JOIN player ON player.teamId = team.id + // GROUP BY team.id + Team.annotated(with: Team.players.count) + ``` + +- [`annotated(withRequired: association)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/annotated(withrequired:)) and [`annotated(withOptional: association)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/annotated(withoptional:)) extends the selection with [Associations]. + + ```swift + // SELECT player.*, team.color + // FROM player + // JOIN team ON team.id = player.teamId + Player.annotated(withRequired: Player.team.select(colorColumn)) + ``` + +- [`distinct()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/distinct()) performs uniquing. + + ```swift + // SELECT DISTINCT name FROM player + Player.select(nameColumn, as: String.self).distinct() + ``` + +- [`filter(expression)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/filteredrequest/filter(_:)) applies conditions. + + ```swift + // SELECT * FROM player WHERE id IN (1, 2, 3) + Player.filter([1,2,3].contains(idColumn)) + + // SELECT * FROM player WHERE (name IS NOT NULL) AND (height > 1.75) + Player.filter(nameColumn != nil && heightColumn > 1.75) + ``` + +- [`filter(id:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(id:)) and [`filter(ids:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(ids:)) are type-safe methods available on [Identifiable Records]: + + ```swift + // SELECT * FROM player WHERE id = 1 + Player.filter(id: 1) + + // SELECT * FROM country WHERE isoCode IN ('FR', 'US') + Country.filter(ids: ["FR", "US"]) + ``` + +- [`filter(key:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(key:)-1p9sq) and [`filter(keys:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/filter(keys:)-6ggt1) apply conditions on primary and unique keys: + + ```swift + // SELECT * FROM player WHERE id = 1 + Player.filter(key: 1) + + // SELECT * FROM country WHERE isoCode IN ('FR', 'US') + Country.filter(keys: ["FR", "US"]) + + // SELECT * FROM citizenship WHERE citizenId = 1 AND countryCode = 'FR' + Citizenship.filter(key: ["citizenId": 1, "countryCode": "FR"]) + + // SELECT * FROM player WHERE email = 'arthur@example.com' + Player.filter(key: ["email": "arthur@example.com"]) + ``` + +- `matching(pattern)` ([FTS3](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/matching(_:)-3s3zr), [FTS5](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/matching(_:)-7c1e8)) performs [full-text search](Documentation/FullTextSearch.md). + + ```swift + // SELECT * FROM document WHERE document MATCH 'sqlite database' + let pattern = FTS3Pattern(matchingAllTokensIn: "SQLite database") + Document.matching(pattern) + ``` + + When the pattern is nil, no row will match. + +- [`group(expression, ...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/aggregatingrequest/group(_:)-edak) groups rows. + + ```swift + // SELECT name, MAX(score) FROM player GROUP BY name + Player + .select(nameColumn, max(scoreColumn)) + .group(nameColumn) + ``` + +- [`having(expression)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/aggregatingrequest/having(_:)) applies conditions on grouped rows. + + ```swift + // SELECT team, MAX(score) FROM player GROUP BY team HAVING MIN(score) >= 1000 + Player + .select(teamColumn, max(scoreColumn)) + .group(teamColumn) + .having(min(scoreColumn) >= 1000) + ``` + +- [`having(aggregate)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/having(_:)) applies conditions on grouped rows, according to an [association aggregate](Documentation/AssociationsBasics.md#association-aggregates). + + ```swift + // SELECT team.* + // FROM team + // LEFT JOIN player ON player.teamId = team.id + // GROUP BY team.id + // HAVING COUNT(DISTINCT player.id) >= 5 + Team.having(Team.players.count >= 5) + ``` + +- [`order(ordering, ...)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/orderedrequest/order(_:)-63rzl) sorts. + + ```swift + // SELECT * FROM player ORDER BY name + Player.order(nameColumn) + + // SELECT * FROM player ORDER BY score DESC, name + Player.order(scoreColumn.desc, nameColumn) + ``` + + SQLite considers NULL values to be smaller than any other values for sorting purposes. Hence, NULLs naturally appear at the beginning of an ascending ordering and at the end of a descending ordering. With a [custom SQLite build], this can be changed using `.ascNullsLast` and `.descNullsFirst`: + + ```swift + // SELECT * FROM player ORDER BY score ASC NULLS LAST + Player.order(nameColumn.ascNullsLast) + ``` + + Each `order` call clears any previous ordering: + + ```swift + // SELECT * FROM player ORDER BY name + Player.order(scoreColumn).order(nameColumn) + ``` + +- [`reversed()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/orderedrequest/reversed()) reverses the eventual orderings. + + ```swift + // SELECT * FROM player ORDER BY score ASC, name DESC + Player.order(scoreColumn.desc, nameColumn).reversed() + ``` + + If no ordering was already specified, this method has no effect: + + ```swift + // SELECT * FROM player + Player.all().reversed() + ``` + +- [`limit(limit, offset: offset)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/limit(_:offset:)) limits and pages results. + + ```swift + // SELECT * FROM player LIMIT 5 + Player.limit(5) + + // SELECT * FROM player LIMIT 5 OFFSET 10 + Player.limit(5, offset: 10) + ``` + +- [`joining(required:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/joining(required:)), [`joining(optional:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/joining(optional:)), [`including(required:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/including(required:)), [`including(optional:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/including(optional:)), and [`including(all:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/joinablerequest/including(all:)) fetch and join records through [Associations]. + + ```swift + // SELECT player.*, team.* + // FROM player + // JOIN team ON team.id = player.teamId + Player.including(required: Player.team) + ``` + +- [`with(cte)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/derivablerequest/with(_:)) embeds a [common table expression]: + + ```swift + // WITH ... SELECT * FROM player + let cte = CommonTableExpression(...) + Player.with(cte) + ``` + +- Other requests that involve the primary key: + + - [`selectPrimaryKey(as:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/selectprimarykey(as:)) selects the primary key. + + ```swift + // SELECT id FROM player + Player.selectPrimaryKey(as: Int64.self) // QueryInterfaceRequest + + // SELECT code FROM country + Country.selectPrimaryKey(as: String.self) // QueryInterfaceRequest + + // SELECT citizenId, countryCode FROM citizenship + Citizenship.selectPrimaryKey(as: Row.self) // QueryInterfaceRequest + ``` + + - [`orderByPrimaryKey()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/orderbyprimarykey()) sorts by primary key. + + ```swift + // SELECT * FROM player ORDER BY id + Player.orderByPrimaryKey() + + // SELECT * FROM country ORDER BY code + Country.orderByPrimaryKey() + + // SELECT * FROM citizenship ORDER BY citizenId, countryCode + Citizenship.orderByPrimaryKey() + ``` + + - [`groupByPrimaryKey()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/tablerequest/groupbyprimarykey()) groups rows by primary key. + + +You can refine requests by chaining those methods: + +```swift +// SELECT * FROM player WHERE (email IS NOT NULL) ORDER BY name +Player.order(nameColumn).filter(emailColumn != nil) +``` + +The `select`, `order`, `group`, and `limit` methods ignore and replace previously applied selection, orderings, grouping, and limits. On the opposite, `filter`, `matching`, and `having` methods extend the query: + +```swift +Player // SELECT * FROM player + .filter(nameColumn != nil) // WHERE (name IS NOT NULL) + .filter(emailColumn != nil) // AND (email IS NOT NULL) + .order(nameColumn) // - ignored - + .reversed() // - ignored - + .order(scoreColumn) // ORDER BY score + .limit(20, offset: 40) // - ignored - + .limit(10) // LIMIT 10 +``` + + +Raw SQL snippets are also accepted, with eventual [arguments](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments): + +```swift +// SELECT DATE(creationDate), COUNT(*) FROM player WHERE name = 'Arthur' GROUP BY date(creationDate) +Player + .select(sql: "DATE(creationDate), COUNT(*)") + .filter(sql: "name = ?", arguments: ["Arthur"]) + .group(sql: "DATE(creationDate)") +``` + + +### Columns Selected by a Request + +By default, query interface requests select all columns: + +```swift +// SELECT * FROM player +struct Player: TableRecord { ... } +let request = Player.all() + +// SELECT * FROM player +let table = Table("player") +let request = table.all() +``` + +**The selection can be changed for each individual requests, or in the case of record-based requests, for all requests built from this record type.** + +The `select(...)` and `select(..., as:)` methods change the selection of a single request (see [Fetching from Requests] for detailed information): + +```swift +let request = Player.select(max(Column("score"))) +let maxScore = try Int.fetchOne(db, request) // Int? + +let request = Player.select(max(Column("score")), as: Int.self) +let maxScore = try request.fetchOne(db) // Int? +``` + +The default selection for a record type is controlled by the `databaseSelection` property: + +```swift +struct RestrictedPlayer : TableRecord { + static let databaseTableName = "player" + static let databaseSelection: [any SQLSelectable] = [Column("id"), Column("name")] +} + +struct ExtendedPlayer : TableRecord { + static let databaseTableName = "player" + static let databaseSelection: [any SQLSelectable] = [AllColumns(), Column.rowID] +} + +// SELECT id, name FROM player +let request = RestrictedPlayer.all() + +// SELECT *, rowid FROM player +let request = ExtendedPlayer.all() +``` + +> **Note**: make sure the `databaseSelection` property is explicitly declared as `[any SQLSelectable]`. If it is not, the Swift compiler may silently miss the protocol requirement, resulting in sticky `SELECT *` requests. To verify your setup, see the [How do I print a request as SQL?](#how-do-i-print-a-request-as-sql) FAQ. + + +## Expressions + +Feed [requests](#requests) with SQL expressions built from your Swift code: + + +### SQL Operators + +📖 [`SQLSpecificExpressible`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/sqlspecificexpressible) + +GRDB comes with a Swift version of many SQLite [built-in operators](https://sqlite.org/lang_expr.html#operators), listed below. But not all: see [Embedding SQL in Query Interface Requests] for a way to add support for missing SQL operators. + +- `=`, `<>`, `<`, `<=`, `>`, `>=`, `IS`, `IS NOT` + + Comparison operators are based on the Swift operators `==`, `!=`, `===`, `!==`, `<`, `<=`, `>`, `>=`: + + ```swift + // SELECT * FROM player WHERE (name = 'Arthur') + Player.filter(nameColumn == "Arthur") + + // SELECT * FROM player WHERE (name IS NULL) + Player.filter(nameColumn == nil) + + // SELECT * FROM player WHERE (score IS 1000) + Player.filter(scoreColumn === 1000) + + // SELECT * FROM rectangle WHERE width < height + Rectangle.filter(widthColumn < heightColumn) + ``` + + Subqueries are supported: + + ```swift + // SELECT * FROM player WHERE score = (SELECT max(score) FROM player) + let maximumScore = Player.select(max(scoreColumn)) + Player.filter(scoreColumn == maximumScore) + + // SELECT * FROM player WHERE score = (SELECT max(score) FROM player) + let maximumScore = SQLRequest("SELECT max(score) FROM player") + Player.filter(scoreColumn == maximumScore) + ``` + + > **Note**: SQLite string comparison, by default, is case-sensitive and not Unicode-aware. See [string comparison](#string-comparison) if you need more control. + +- `*`, `/`, `+`, `-` + + SQLite arithmetic operators are derived from their Swift equivalent: + + ```swift + // SELECT ((temperature * 1.8) + 32) AS fahrenheit FROM planet + Planet.select((temperatureColumn * 1.8 + 32).forKey("fahrenheit")) + ``` + + > **Note**: an expression like `nameColumn + "rrr"` will be interpreted by SQLite as a numerical addition (with funny results), not as a string concatenation. See the `concat` operator below. + + When you want to join a sequence of expressions with the `+` or `*` operator, use `joined(operator:)`: + + ```swift + // SELECT score + bonus + 1000 FROM player + let values = [ + scoreColumn, + bonusColumn, + 1000.databaseValue] + Player.select(values.joined(operator: .add)) + ``` + + Note in the example above how you concatenate raw values: `1000.databaseValue`. A plain `1000` would not compile. + + When the sequence is empty, `joined(operator: .add)` returns 0, and `joined(operator: .multiply)` returns 1. + +- `&`, `|`, `~`, `<<`, `>>` + + Bitwise operations (bitwise and, or, not, left shift, right shift) are derived from their Swift equivalent: + + ```swift + // SELECT mask & 2 AS isRocky FROM planet + Planet.select((Column("mask") & 2).forKey("isRocky")) + ``` + +- `||` + + Concatenate several strings: + + ```swift + // SELECT firstName || ' ' || lastName FROM player + Player.select([firstNameColumn, " ".databaseValue, lastNameColumn].joined(operator: .concat)) + ``` + + Note in the example above how you concatenate raw strings: `" ".databaseValue`. A plain `" "` would not compile. + + When the sequence is empty, `joined(operator: .concat)` returns the empty string. + +- `AND`, `OR`, `NOT` + + The SQL logical operators are derived from the Swift `&&`, `||` and `!`: + + ```swift + // SELECT * FROM player WHERE ((NOT verified) OR (score < 1000)) + Player.filter(!verifiedColumn || scoreColumn < 1000) + ``` + + When you want to join a sequence of expressions with the `AND` or `OR` operator, use `joined(operator:)`: + + ```swift + // SELECT * FROM player WHERE (verified AND (score >= 1000) AND (name IS NOT NULL)) + let conditions = [ + verifiedColumn, + scoreColumn >= 1000, + nameColumn != nil] + Player.filter(conditions.joined(operator: .and)) + ``` + + When the sequence is empty, `joined(operator: .and)` returns true, and `joined(operator: .or)` returns false: + + ```swift + // SELECT * FROM player WHERE 1 + Player.filter([].joined(operator: .and)) + + // SELECT * FROM player WHERE 0 + Player.filter([].joined(operator: .or)) + ``` + +- `BETWEEN`, `IN`, `NOT IN` + + To check inclusion in a Swift sequence (array, set, range…), call the `contains` method: + + ```swift + // SELECT * FROM player WHERE id IN (1, 2, 3) + Player.filter([1, 2, 3].contains(idColumn)) + + // SELECT * FROM player WHERE id NOT IN (1, 2, 3) + Player.filter(![1, 2, 3].contains(idColumn)) + + // SELECT * FROM player WHERE score BETWEEN 0 AND 1000 + Player.filter((0...1000).contains(scoreColumn)) + + // SELECT * FROM player WHERE (score >= 0) AND (score < 1000) + Player.filter((0..<1000).contains(scoreColumn)) + + // SELECT * FROM player WHERE initial BETWEEN 'A' AND 'N' + Player.filter(("A"..."N").contains(initialColumn)) + + // SELECT * FROM player WHERE (initial >= 'A') AND (initial < 'N') + Player.filter(("A"..<"N").contains(initialColumn)) + ``` + + To check inclusion inside a subquery, call the `contains` method as well: + + ```swift + // SELECT * FROM player WHERE id IN (SELECT playerId FROM playerSelection) + let selectedPlayerIds = PlayerSelection.select(playerIdColumn) + Player.filter(selectedPlayerIds.contains(idColumn)) + + // SELECT * FROM player WHERE id IN (SELECT playerId FROM playerSelection) + let selectedPlayerIds = SQLRequest("SELECT playerId FROM playerSelection") + Player.filter(selectedPlayerIds.contains(idColumn)) + ``` + + To check inclusion inside a [common table expression], call the `contains` method as well: + + ```swift + // WITH selectedName AS (...) + // SELECT * FROM player WHERE name IN selectedName + let cte = CommonTableExpression(named: "selectedName", ...) + Player + .with(cte) + .filter(cte.contains(nameColumn)) + ``` + + > **Note**: SQLite string comparison, by default, is case-sensitive and not Unicode-aware. See [string comparison](#string-comparison) if you need more control. + +- `EXISTS`, `NOT EXISTS` + + To check if a subquery would return rows, call the `exists` method: + + ```swift + // Teams that have at least one other player + // + // SELECT * FROM team + // WHERE EXISTS (SELECT * FROM player WHERE teamID = team.id) + let teamAlias = TableAlias() + let player = Player.filter(Column("teamID") == teamAlias[Column("id")]) + let teams = Team.aliased(teamAlias).filter(player.exists()) + + // Teams that have no player + // + // SELECT * FROM team + // WHERE NOT EXISTS (SELECT * FROM player WHERE teamID = team.id) + let teams = Team.aliased(teamAlias).filter(!player.exists()) + ``` + + In the above example, you use a `TableAlias` in order to let a subquery refer to a column from another table. + + In the next example, which involves the same table twice, the table alias requires an explicit disambiguation with `TableAlias(name:)`: + + ```swift + // Players who coach at least one other player + // + // SELECT coach.* FROM player coach + // WHERE EXISTS (SELECT * FROM player WHERE coachId = coach.id) + let coachAlias = TableAlias(name: "coach") + let coachedPlayer = Player.filter(Column("coachId") == coachAlias[Column("id")]) + let coaches = Player.aliased(coachAlias).filter(coachedPlayer.exists()) + ``` + + Finally, subqueries can also be expressed as SQL, with [SQL Interpolation]: + + ```swift + // SELECT coach.* FROM player coach + // WHERE EXISTS (SELECT * FROM player WHERE coachId = coach.id) + let coachedPlayer = SQLRequest("SELECT * FROM player WHERE coachId = \(coachAlias[Column("id")])") + let coaches = Player.aliased(coachAlias).filter(coachedPlayer.exists()) + ``` + +- `LIKE` + + The SQLite LIKE operator is available as the `like` method: + + ```swift + // SELECT * FROM player WHERE (email LIKE '%@example.com') + Player.filter(emailColumn.like("%@example.com")) + + // SELECT * FROM book WHERE (title LIKE '%10\%%' ESCAPE '\') + Player.filter(emailColumn.like("%10\\%%", escape: "\\")) + ``` + + > **Note**: the SQLite LIKE operator is case-insensitive but not Unicode-aware. For example, the expression `'a' LIKE 'A'` is true but `'æ' LIKE 'Æ'` is false. + +- `MATCH` + + The full-text MATCH operator is available through [FTS3Pattern](Documentation/FullTextSearch.md#fts3pattern) (for FTS3 and FTS4 tables) and [FTS5Pattern](Documentation/FullTextSearch.md#fts5pattern) (for FTS5): + + FTS3 and FTS4: + + ```swift + let pattern = FTS3Pattern(matchingAllTokensIn: "SQLite database") + + // SELECT * FROM document WHERE document MATCH 'sqlite database' + Document.matching(pattern) + + // SELECT * FROM document WHERE content MATCH 'sqlite database' + Document.filter(contentColumn.match(pattern)) + ``` + + FTS5: + + ```swift + let pattern = FTS5Pattern(matchingAllTokensIn: "SQLite database") + + // SELECT * FROM document WHERE document MATCH 'sqlite database' + Document.matching(pattern) + ``` +- `AS` + + To give an alias to an expression, use the `forKey` method: + + ```swift + // SELECT (score + bonus) AS total + // FROM player + Player.select((Column("score") + Column("bonus")).forKey("total")) + ``` + + If you need to refer to this aliased column in another place of the request, use a detached column: + + ```swift + // SELECT (score + bonus) AS total + // FROM player + // ORDER BY total + Player + .select((Column("score") + Column("bonus")).forKey("total")) + .order(Column("total").detached) + ``` + + Unlike `Column("total")`, the detached column `Column("total").detached` is never associated to the "player" table, so it is always rendered as `total` in the generated SQL, even when the request involves other tables via an [association](Documentation/AssociationsBasics.md) or a [common table expression]. + + +### SQL Functions + +📖 [`SQLSpecificExpressible`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/sqlspecificexpressible) + +GRDB comes with a Swift version of many SQLite [built-in functions](https://sqlite.org/lang_corefunc.html), listed below. But not all: see [Embedding SQL in Query Interface Requests] for a way to add support for missing SQL functions. + +- `ABS`, `AVG`, `COUNT`, `DATETIME`, `JULIANDAY`, `LENGTH`, `MAX`, `MIN`, `SUM`, `TOTAL`: + + Those are based on the `abs`, `average`, `count`, `dateTime`, `julianDay`, `length`, `max`, `min`, `sum` and `total` Swift functions: + + ```swift + // SELECT MIN(score), MAX(score) FROM player + Player.select(min(scoreColumn), max(scoreColumn)) + + // SELECT COUNT(name) FROM player + Player.select(count(nameColumn)) + + // SELECT COUNT(DISTINCT name) FROM player + Player.select(count(distinct: nameColumn)) + + // SELECT JULIANDAY(date, 'start of year') FROM game + Game.select(julianDay(dateColumn, .startOfYear)) + ``` + + For more information about the functions `dateTime` and `julianDay`, see [Date And Time Functions](https://www.sqlite.org/lang_datefunc.html). + +- `CAST` + + Use the `cast` Swift function: + + ```swift + // SELECT (CAST(wins AS REAL) / games) AS successRate FROM player + Player.select((cast(winsColumn, as: .real) / gamesColumn).forKey("successRate")) + ``` + + See [CAST expressions](https://www.sqlite.org/lang_expr.html#castexpr) for more information about SQLite conversions. + +- `IFNULL` + + Use the Swift `??` operator: + + ```swift + // SELECT IFNULL(name, 'Anonymous') FROM player + Player.select(nameColumn ?? "Anonymous") + + // SELECT IFNULL(name, email) FROM player + Player.select(nameColumn ?? emailColumn) + ``` + +- `LOWER`, `UPPER` + + The query interface does not give access to those SQLite functions. Nothing against them, but they are not unicode aware. + + Instead, GRDB extends SQLite with SQL functions that call the Swift built-in string functions `capitalized`, `lowercased`, `uppercased`, `localizedCapitalized`, `localizedLowercased` and `localizedUppercased`: + + ```swift + Player.select(nameColumn.uppercased()) + ``` + + > **Note**: When *comparing* strings, you'd rather use a [collation](#string-comparison): + > + > ```swift + > let name: String = ... + > + > // Not recommended + > nameColumn.uppercased() == name.uppercased() + > + > // Better + > nameColumn.collating(.caseInsensitiveCompare) == name + > ``` + +- Custom SQL functions and aggregates + + You can apply your own [custom SQL functions and aggregates](#custom-functions-): + + ```swift + let f = DatabaseFunction("f", ...) + + // SELECT f(name) FROM player + Player.select(f.apply(nameColumn)) + ``` + +## Embedding SQL in Query Interface Requests + +You will sometimes want to extend your query interface requests with SQL snippets. This can happen because GRDB does not provide a Swift interface for some SQL function or operator, or because you want to use an SQLite construct that GRDB does not support. + +Support for extensibility is large, but not unlimited. All the SQL queries built by the query interface request have the shape below. _If you need something else, you'll have to use [raw SQL requests](#sqlite-api)._ + +```sql +WITH ... -- 1 +SELECT ... -- 2 +FROM ... -- 3 +JOIN ... -- 4 +WHERE ... -- 5 +GROUP BY ... -- 6 +HAVING ... -- 7 +ORDER BY ... -- 8 +LIMIT ... -- 9 +``` + +1. `WITH ...`: see [Common Table Expressions]. + +2. `SELECT ...` + + The selection can be provided as raw SQL: + + ```swift + // SELECT IFNULL(name, 'O''Brien'), score FROM player + let request = Player.select(sql: "IFNULL(name, 'O''Brien'), score") + + // SELECT IFNULL(name, 'O''Brien'), score FROM player + let defaultName = "O'Brien" + let request = Player.select(sql: "IFNULL(name, ?), score", arguments: [suffix]) + ``` + + The selection can be provided with [SQL Interpolation]: + + ```swift + // SELECT IFNULL(name, 'O''Brien'), score FROM player + let defaultName = "O'Brien" + let request = Player.select(literal: "IFNULL(name, \(defaultName)), score") + ``` + + The selection can be provided with a mix of Swift and [SQL Interpolation]: + + ```swift + // SELECT IFNULL(name, 'O''Brien') AS displayName, score FROM player + let defaultName = "O'Brien" + let displayName: SQL = "IFNULL(\(Column("name")), \(defaultName)) AS displayName" + let request = Player.select(displayName, Column("score")) + ``` + + When the custom SQL snippet should behave as a full-fledged expression, with support for the `+` Swift operator, the `forKey` aliasing method, and all other [SQL Operators](#sql-operators), build an _expression literal_ with the `SQL.sqlExpression` method: + + ```swift + // SELECT IFNULL(name, 'O''Brien') AS displayName, score FROM player + let defaultName = "O'Brien" + let displayName = SQL("IFNULL(\(Column("name")), \(defaultName))").sqlExpression + let request = Player.select(displayName.forKey("displayName"), Column("score")) + ``` + + Such expression literals allow you to build a reusable support library of SQL functions or operators that are missing from the query interface. For example, you can define a Swift `date` function: + + ```swift + func date(_ value: some SQLSpecificExpressible) -> SQLExpression { + SQL("DATE(\(value))").sqlExpression + } + + // SELECT * FROM "player" WHERE DATE("createdAt") = '2020-01-23' + let request = Player.filter(date(Column("createdAt")) == "2020-01-23") + ``` + + See the [Query Interface Organization] for more information about `SQLSpecificExpressible` and `SQLExpression`. + +3. `FROM ...`: only one table is supported here. You can not customize this SQL part. + +4. `JOIN ...`: joins are fully controlled by [Associations]. You can not customize this SQL part. + +5. `WHERE ...` + + The WHERE clause can be provided as raw SQL: + + ```swift + // SELECT * FROM player WHERE score >= 1000 + let request = Player.filter(sql: "score >= 1000") + + // SELECT * FROM player WHERE score >= 1000 + let minScore = 1000 + let request = Player.filter(sql: "score >= ?", arguments: [minScore]) + ``` + + The WHERE clause can be provided with [SQL Interpolation]: + + ```swift + // SELECT * FROM player WHERE score >= 1000 + let minScore = 1000 + let request = Player.filter(literal: "score >= \(minScore)") + ``` + + The WHERE clause can be provided with a mix of Swift and [SQL Interpolation]: + + ```swift + // SELECT * FROM player WHERE (score >= 1000) AND (team = 'red') + let minScore = 1000 + let scoreCondition: SQL = "\(Column("score")) >= \(minScore)" + let request = Player.filter(scoreCondition && Column("team") == "red") + ``` + + See `SELECT ...` above for more SQL Interpolation examples. + +6. `GROUP BY ...` + + The GROUP BY clause can be provided as raw SQL, SQL Interpolation, or a mix of Swift and SQL Interpolation, just as the selection and the WHERE clause (see above). + +7. `HAVING ...` + + The HAVING clause can be provided as raw SQL, SQL Interpolation, or a mix of Swift and SQL Interpolation, just as the selection and the WHERE clause (see above). + +8. `ORDER BY ...` + + The ORDER BY clause can be provided as raw SQL, SQL Interpolation, or a mix of Swift and SQL Interpolation, just as the selection and the WHERE clause (see above). + + In order to support the `desc` and `asc` query interface operators, and the `reversed()` query interface method, you must provide your orderings as _expression literals_ with the `SQL.sqlExpression` method: + + ```swift + // SELECT * FROM "player" + // ORDER BY (score + bonus) ASC, name DESC + let total = SQL("(score + bonus)").sqlExpression + let request = Player + .order(total.desc, Column("name")) + .reversed() + ``` + +9. `LIMIT ...`: use the `limit(_:offset:)` method. You can not customize this SQL part. + + +## Fetching from Requests + +Once you have a request, you can fetch the records at the origin of the request: + +```swift +// Some request based on `Player` +let request = Player.filter(...)... // QueryInterfaceRequest + +// Fetch players: +try request.fetchCursor(db) // A Cursor of Player +try request.fetchAll(db) // [Player] +try request.fetchSet(db) // Set +try request.fetchOne(db) // Player? +``` + +For example: + +```swift +let allPlayers = try Player.fetchAll(db) // [Player] +let arthur = try Player.filter(nameColumn == "Arthur").fetchOne(db) // Player? +``` + +See [fetching methods](#fetching-methods) for information about the `fetchCursor`, `fetchAll`, `fetchSet` and `fetchOne` methods. + +**You sometimes want to fetch other values**. + +The simplest way is to use the request as an argument to a fetching method of the desired type: + +```swift +// Fetch an Int +let request = Player.select(max(scoreColumn)) +let maxScore = try Int.fetchOne(db, request) // Int? + +// Fetch a Row +let request = Player.select(min(scoreColumn), max(scoreColumn)) +let row = try Row.fetchOne(db, request)! // Row +let minScore = row[0] as Int? +let maxScore = row[1] as Int? +``` + +You can also change the request so that it knows the type it has to fetch: + +- With `asRequest(of:)`, useful when you use [Associations]: + + ```swift + struct BookInfo: FetchableRecord, Decodable { + var book: Book + var author: Author + } + + // A request of BookInfo + let request = Book + .including(required: Book.author) + .asRequest(of: BookInfo.self) + + let bookInfos = try dbQueue.read { db in + try request.fetchAll(db) // [BookInfo] + } + ``` + +- With `select(..., as:)`, which is handy when you change the selection: + + ```swift + // A request of Int + let request = Player.select(max(scoreColumn), as: Int.self) + + let maxScore = try dbQueue.read { db in + try request.fetchOne(db) // Int? + } + ``` + + +## Fetching by Key + +**Fetching records according to their primary key** is a common task. + +[Identifiable Records] can use the type-safe methods `find(_:id:)`, `fetchOne(_:id:)`, `fetchAll(_:ids:)` and `fetchSet(_:ids:)`: + +```swift +try Player.find(db, id: 1) // Player +try Player.fetchOne(db, id: 1) // Player? +try Country.fetchAll(db, ids: ["FR", "US"]) // [Countries] +``` + +All record types can use `find(_:key:)`, `fetchOne(_:key:)`, `fetchAll(_:keys:)` and `fetchSet(_:keys:)` that apply conditions on primary and unique keys: + +```swift +try Player.find(db, key: 1) // Player +try Player.fetchOne(db, key: 1) // Player? +try Country.fetchAll(db, keys: ["FR", "US"]) // [Country] +try Player.fetchOne(db, key: ["email": "arthur@example.com"]) // Player? +try Citizenship.fetchOne(db, key: ["citizenId": 1, "countryCode": "FR"]) // Citizenship? +``` + +When the table has no explicit primary key, GRDB uses the [hidden `rowid` column](https://www.sqlite.org/rowidtable.html): + +```swift +// SELECT * FROM document WHERE rowid = 1 +try Document.fetchOne(db, key: 1) // Document? +``` + +**When you want to build a request and plan to fetch from it later**, use a `filter` method: + +```swift +let request = Player.filter(id: 1) +let request = Country.filter(ids: ["FR", "US"]) +let request = Player.filter(key: ["email": "arthur@example.com"]) +let request = Citizenship.filter(key: ["citizenId": 1, "countryCode": "FR"]) +``` + + +## Testing for Record Existence + +**You can check if a request has matching rows in the database.** + +```swift +// Some request based on `Player` +let request = Player.filter(...)... + +// Check for player existence: +let noSuchPlayer = try request.isEmpty(db) // Bool +``` + +You should check for emptiness instead of counting: + +```swift +// Correct +let noSuchPlayer = try request.fetchCount(db) == 0 +// Even better +let noSuchPlayer = try request.isEmpty(db) +``` + +**You can also check if a given primary or unique key exists in the database.** + +[Identifiable Records] can use the type-safe method `exists(_:id:)`: + +```swift +try Player.exists(db, id: 1) +try Country.exists(db, id: "FR") +``` + +All record types can use `exists(_:key:)` that can check primary and unique keys: + +```swift +try Player.exists(db, key: 1) +try Country.exists(db, key: "FR") +try Player.exists(db, key: ["email": "arthur@example.com"]) +try Citizenship.exists(db, key: ["citizenId": 1, "countryCode": "FR"]) +``` + +You should check for key existence instead of fetching a record and checking for nil: + +```swift +// Correct +let playerExists = try Player.fetchOne(db, id: 1) != nil +// Even better +let playerExists = try Player.exists(db, id: 1) +``` + + +## Fetching Aggregated Values + +**Requests can count.** The `fetchCount()` method returns the number of rows that would be returned by a fetch request: + +```swift +// SELECT COUNT(*) FROM player +let count = try Player.fetchCount(db) // Int + +// SELECT COUNT(*) FROM player WHERE email IS NOT NULL +let count = try Player.filter(emailColumn != nil).fetchCount(db) + +// SELECT COUNT(DISTINCT name) FROM player +let count = try Player.select(nameColumn).distinct().fetchCount(db) + +// SELECT COUNT(*) FROM (SELECT DISTINCT name, score FROM player) +let count = try Player.select(nameColumn, scoreColumn).distinct().fetchCount(db) +``` + + +**Other aggregated values** can also be selected and fetched (see [SQL Functions](#sql-functions)): + +```swift +let request = Player.select(max(scoreColumn)) +let maxScore = try Int.fetchOne(db, request) // Int? + +let request = Player.select(min(scoreColumn), max(scoreColumn)) +let row = try Row.fetchOne(db, request)! // Row +let minScore = row[0] as Int? +let maxScore = row[1] as Int? +``` + + +## Delete Requests + +**Requests can delete records**, with the `deleteAll()` method: + +```swift +// DELETE FROM player +try Player.deleteAll(db) + +// DELETE FROM player WHERE team = 'red' +try Player + .filter(teamColumn == "red") + .deleteAll(db) + +// DELETE FROM player ORDER BY score LIMIT 10 +try Player + .order(scoreColumn) + .limit(10) + .deleteAll(db) +``` + +> **Note** Deletion methods are available on types that adopt the [TableRecord] protocol, and `Table`: +> +> ```swift +> struct Player: TableRecord { ... } +> try Player.deleteAll(db) // Fine +> try Table("player").deleteAll(db) // Just as fine +> ``` + +**Deleting records according to their primary key** is a common task. + +[Identifiable Records] can use the type-safe methods `deleteOne(_:id:)` and `deleteAll(_:ids:)`: + +```swift +try Player.deleteOne(db, id: 1) +try Country.deleteAll(db, ids: ["FR", "US"]) +``` + +All record types can use `deleteOne(_:key:)` and `deleteAll(_:keys:)` that apply conditions on primary and unique keys: + +```swift +try Player.deleteOne(db, key: 1) +try Country.deleteAll(db, keys: ["FR", "US"]) +try Player.deleteOne(db, key: ["email": "arthur@example.com"]) +try Citizenship.deleteOne(db, key: ["citizenId": 1, "countryCode": "FR"]) +``` + +When the table has no explicit primary key, GRDB uses the [hidden `rowid` column](https://www.sqlite.org/rowidtable.html): + +```swift +// DELETE FROM document WHERE rowid = 1 +try Document.deleteOne(db, id: 1) // Document? +``` + + +## Update Requests + +**Requests can batch update records**. The `updateAll()` method accepts *column assignments* defined with the `set(to:)` method: + +```swift +// UPDATE player SET score = 0, isHealthy = 1, bonus = NULL +try Player.updateAll(db, + Column("score").set(to: 0), + Column("isHealthy").set(to: true), + Column("bonus").set(to: nil)) + +// UPDATE player SET score = 0 WHERE team = 'red' +try Player + .filter(Column("team") == "red") + .updateAll(db, Column("score").set(to: 0)) + +// UPDATE player SET top = 1 ORDER BY score DESC LIMIT 10 +try Player + .order(Column("score").desc) + .limit(10) + .updateAll(db, Column("top").set(to: true)) + +// UPDATE country SET population = 67848156 WHERE id = 'FR' +try Country + .filter(id: "FR") + .updateAll(db, Column("population").set(to: 67_848_156)) +``` + +Column assignments accept any expression: + +```swift +// UPDATE player SET score = score + (bonus * 2) +try Player.updateAll(db, Column("score").set(to: Column("score") + Column("bonus") * 2)) +``` + +As a convenience, you can also use the `+=`, `-=`, `*=`, or `/=` operators: + +```swift +// UPDATE player SET score = score + (bonus * 2) +try Player.updateAll(db, Column("score") += Column("bonus") * 2) +``` + +Default [Conflict Resolution] rules apply, and you may also provide a specific one: + +```swift +// UPDATE OR IGNORE player SET ... +try Player.updateAll(db, onConflict: .ignore, /* assignments... */) +``` + +> **Note** The `updateAll` method is available on types that adopt the [TableRecord] protocol, and `Table`: +> +> ```swift +> struct Player: TableRecord { ... } +> try Player.updateAll(db, ...) // Fine +> try Table("player").updateAll(db, ...) // Just as fine +> ``` + + +## Custom Requests + +Until now, we have seen [requests](#requests) created from any type that adopts the [TableRecord] protocol: + +```swift +let request = Player.all() // QueryInterfaceRequest +``` + +Those requests of type `QueryInterfaceRequest` can fetch and count: + +```swift +try request.fetchCursor(db) // A Cursor of Player +try request.fetchAll(db) // [Player] +try request.fetchSet(db) // Set +try request.fetchOne(db) // Player? +try request.fetchCount(db) // Int +``` + +**When the query interface can not generate the SQL you need**, you can still fallback to [raw SQL](#fetch-queries): + +```swift +// Custom SQL is always welcome +try Player.fetchAll(db, sql: "SELECT ...") // [Player] +``` + +But you may prefer to bring some elegance back in, and build custom requests: + +```swift +// No custom SQL in sight +try Player.customRequest().fetchAll(db) // [Player] +``` + +**To build custom requests**, you can use one of the built-in requests or derive requests from other requests. + +- [SQLRequest] is a fetch request built from raw SQL. For example: + + ```swift + extension Player { + static func filter(color: Color) -> SQLRequest { + SQLRequest( + sql: "SELECT * FROM player WHERE color = ?" + arguments: [color]) + } + } + + // [Player] + try Player.filter(color: .red).fetchAll(db) + ``` + + SQLRequest supports [SQL Interpolation]: + + ```swift + extension Player { + static func filter(color: Color) -> SQLRequest { + "SELECT * FROM player WHERE color = \(color)" + } + } + ``` + +- The [`asRequest(of:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/queryinterfacerequest/asrequest(of:)) method changes the type fetched by the request. It is useful, for example, when you use [Associations]: + + ```swift + struct BookInfo: FetchableRecord, Decodable { + var book: Book + var author: Author + } + + let request = Book + .including(required: Book.author) + .asRequest(of: BookInfo.self) + + // [BookInfo] + try request.fetchAll(db) + ``` + +- The [`adapted(_:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/fetchrequest/adapted(_:)) method eases the consumption of complex rows with row adapters. See [`RowAdapter`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/rowadapter) and [`splittingRowAdapters(columnCounts:)`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/splittingrowadapters(columncounts:)) for a sample code that uses `adapted(_:)`. + + +Encryption +========== + +**GRDB can encrypt your database with [SQLCipher](http://sqlcipher.net) v3.4+.** + +Use [CocoaPods](http://cocoapods.org/), and specify in your `Podfile`: + +```ruby +# GRDB with SQLCipher 4 +pod 'GRDB.swift/SQLCipher' +pod 'SQLCipher', '~> 4.0' + +# GRDB with SQLCipher 3 +pod 'GRDB.swift/SQLCipher' +pod 'SQLCipher', '~> 3.4' +``` + +Make sure you remove any existing `pod 'GRDB.swift'` from your Podfile. `GRDB.swift/SQLCipher` must be the only active GRDB pod in your whole project, or you will face linker or runtime errors, due to the conflicts between SQLCipher and the system SQLite. + +- [Creating or Opening an Encrypted Database](#creating-or-opening-an-encrypted-database) +- [Changing the Passphrase of an Encrypted Database](#changing-the-passphrase-of-an-encrypted-database) +- [Exporting a Database to an Encrypted Database](#exporting-a-database-to-an-encrypted-database) +- [Security Considerations](#security-considerations) + + +### Creating or Opening an Encrypted Database + +**You create and open an encrypted database** by providing a passphrase to your [database connection]: + +```swift +var config = Configuration() +config.prepareDatabase { db in + try db.usePassphrase("secret") +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) +``` + +It is also in `prepareDatabase` that you perform other [SQLCipher configuration steps](https://www.zetetic.net/sqlcipher/sqlcipher-api/) that must happen early in the lifetime of a SQLCipher connection. For example: + +```swift +var config = Configuration() +config.prepareDatabase { db in + try db.usePassphrase("secret") + try db.execute(sql: "PRAGMA cipher_page_size = ...") + try db.execute(sql: "PRAGMA kdf_iter = ...") +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) +``` + +When you want to open an existing SQLCipher 3 database with SQLCipher 4, you may want to run the `cipher_compatibility` pragma: + +```swift +// Open an SQLCipher 3 database with SQLCipher 4 +var config = Configuration() +config.prepareDatabase { db in + try db.usePassphrase("secret") + try db.execute(sql: "PRAGMA cipher_compatibility = 3") +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) +``` + +See [SQLCipher 4.0.0 Release](https://www.zetetic.net/blog/2018/11/30/sqlcipher-400-release/) and [Upgrading to SQLCipher 4](https://discuss.zetetic.net/t/upgrading-to-sqlcipher-4/3283) for more information. + + +### Changing the Passphrase of an Encrypted Database + +**You can change the passphrase** of an already encrypted database. + +When you use a [database queue](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasequeue), open the database with the old passphrase, and then apply the new passphrase: + +```swift +try dbQueue.write { db in + try db.changePassphrase("newSecret") +} +``` + +When you use a [database pool](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasepool), make sure that no concurrent read can happen by changing the passphrase within the `barrierWriteWithoutTransaction` block. You must also ensure all future reads open a new database connection by calling the `invalidateReadOnlyConnections` method: + +```swift +try dbPool.barrierWriteWithoutTransaction { db in + try db.changePassphrase("newSecret") + dbPool.invalidateReadOnlyConnections() +} +``` + +> **Note**: When an application wants to keep on using a database queue or pool after the passphrase has changed, it is responsible for providing the correct passphrase to the `usePassphrase` method called in the database preparation function. Consider: +> +> ```swift +> // WRONG: this won't work across a passphrase change +> let passphrase = try getPassphrase() +> var config = Configuration() +> config.prepareDatabase { db in +> try db.usePassphrase(passphrase) +> } +> +> // CORRECT: get the latest passphrase when it is needed +> var config = Configuration() +> config.prepareDatabase { db in +> let passphrase = try getPassphrase() +> try db.usePassphrase(passphrase) +> } +> ``` + +> **Note**: The `DatabasePool.barrierWriteWithoutTransaction` method does not prevent [database snapshots](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasesnapshot) from accessing the database during the passphrase change, or after the new passphrase has been applied to the database. Those database accesses may throw errors. Applications should provide their own mechanism for invalidating open snapshots before the passphrase is changed. + +> **Note**: Instead of changing the passphrase "in place" as described here, you can also export the database in a new encrypted database that uses the new passphrase. See [Exporting a Database to an Encrypted Database](#exporting-a-database-to-an-encrypted-database). + + +### Exporting a Database to an Encrypted Database + +Providing a passphrase won't encrypt a clear-text database that already exists, though. SQLCipher can't do that, and you will get an error instead: `SQLite error 26: file is encrypted or is not a database`. + +Instead, create a new encrypted database, at a distinct location, and export the content of the existing database. This can both encrypt a clear-text database, or change the passphrase of an encrypted database. + +The technique to do that is [documented](https://discuss.zetetic.net/t/how-to-encrypt-a-plaintext-sqlite-database-to-use-sqlcipher-and-avoid-file-is-encrypted-or-is-not-a-database-errors/868/1) by SQLCipher. + +With GRDB, it gives: + +```swift +// The existing database +let existingDBQueue = try DatabaseQueue(path: "/path/to/existing.db") + +// The new encrypted database, at some distinct location: +var config = Configuration() +config.prepareDatabase { db in + try db.usePassphrase("secret") +} +let newDBQueue = try DatabaseQueue(path: "/path/to/new.db", configuration: config) + +try existingDBQueue.inDatabase { db in + try db.execute( + sql: """ + ATTACH DATABASE ? AS encrypted KEY ?; + SELECT sqlcipher_export('encrypted'); + DETACH DATABASE encrypted; + """, + arguments: [newDBQueue.path, "secret"]) +} + +// Now the export is completed, and the existing database can be deleted. +``` + + +### Security Considerations + +#### Managing the lifetime of the passphrase string + +It is recommended to avoid keeping the passphrase in memory longer than necessary. To do this, make sure you load the passphrase from the `prepareDatabase` method: + +```swift +// NOT RECOMMENDED: this keeps the passphrase in memory longer than necessary +let passphrase = try getPassphrase() +var config = Configuration() +config.prepareDatabase { db in + try db.usePassphrase(passphrase) +} + +// RECOMMENDED: only load the passphrase when it is needed +var config = Configuration() +config.prepareDatabase { db in + let passphrase = try getPassphrase() + try db.usePassphrase(passphrase) +} +``` + +This technique helps manages the lifetime of the passphrase, although keep in mind that the content of a String may remain intact in memory long after the object has been released. + +For even better control over the lifetime of the passphrase in memory, use a Data object which natively provides the `resetBytes` function. + +```swift +// RECOMMENDED: only load the passphrase when it is needed and reset its content immediately after use +var config = Configuration() +config.prepareDatabase { db in + var passphraseData = try getPassphraseData() // Data + defer { + passphraseData.resetBytes(in: 0.. **Warning**: Passing non-default values of `pagesPerStep` or `progress` to the backup methods is an advanced API intended to provide additional capabilities to expert users. GRDB's backup API provides a faithful, low-level wrapper to the underlying SQLite online backup API. GRDB's documentation is not a comprehensive substitute for the official SQLite [documentation of their backup API](https://www.sqlite.org/c3ref/backup_finish.html). + +## Interrupt a Database + +**The `interrupt()` method** causes any pending database operation to abort and return at its earliest opportunity. + +It can be called from any thread. + +```swift +dbQueue.interrupt() +dbPool.interrupt() +``` + +A call to `interrupt()` that occurs when there are no running SQL statements is a no-op and has no effect on SQL statements that are started after `interrupt()` returns. + +A database operation that is interrupted will throw a DatabaseError with code `SQLITE_INTERRUPT`. If the interrupted SQL operation is an INSERT, UPDATE, or DELETE that is inside an explicit transaction, then the entire transaction will be rolled back automatically. If the rolled back transaction was started by a transaction-wrapping method such as `DatabaseWriter.write` or `Database.inTransaction`, then all database accesses will throw a DatabaseError with code `SQLITE_ABORT` until the wrapping method returns. + +For example: + +```swift +try dbQueue.write { db in + try Player(...).insert(db) // throws SQLITE_INTERRUPT + try Player(...).insert(db) // not executed +} // throws SQLITE_INTERRUPT + +try dbQueue.write { db in + do { + try Player(...).insert(db) // throws SQLITE_INTERRUPT + } catch { } +} // throws SQLITE_ABORT + +try dbQueue.write { db in + do { + try Player(...).insert(db) // throws SQLITE_INTERRUPT + } catch { } + try Player(...).insert(db) // throws SQLITE_ABORT +} // throws SQLITE_ABORT +``` + +You can catch both `SQLITE_INTERRUPT` and `SQLITE_ABORT` errors: + +```swift +do { + try dbPool.write { db in ... } +} catch DatabaseError.SQLITE_INTERRUPT, DatabaseError.SQLITE_ABORT { + // Oops, the database was interrupted. +} +``` + +For more information, see [Interrupt A Long-Running Query](https://www.sqlite.org/c3ref/interrupt.html). + + +## Avoiding SQL Injection + +SQL injection is a technique that lets an attacker nuke your database. + +> ![XKCD: Exploits of a Mom](https://imgs.xkcd.com/comics/exploits_of_a_mom.png) +> +> https://xkcd.com/327/ + +Here is an example of code that is vulnerable to SQL injection: + +```swift +// BAD BAD BAD +let id = 1 +let name = textField.text +try dbQueue.write { db in + try db.execute(sql: "UPDATE students SET name = '\(name)' WHERE id = \(id)") +} +``` + +If the user enters a funny string like `Robert'; DROP TABLE students; --`, SQLite will see the following SQL, and drop your database table instead of updating a name as intended: + +```sql +UPDATE students SET name = 'Robert'; +DROP TABLE students; +--' WHERE id = 1 +``` + +To avoid those problems, **never embed raw values in your SQL queries**. The only correct technique is to provide [arguments](#executing-updates) to your raw SQL queries: + +```swift +let name = textField.text +try dbQueue.write { db in + // Good + try db.execute( + sql: "UPDATE students SET name = ? WHERE id = ?", + arguments: [name, id]) + + // Just as good + try db.execute( + sql: "UPDATE students SET name = :name WHERE id = :id", + arguments: ["name": name, "id": id]) +} +``` + +When you use [records](#records) and the [query interface](#the-query-interface), GRDB always prevents SQL injection for you: + +```swift +let id = 1 +let name = textField.text +try dbQueue.write { db in + if var student = try Student.fetchOne(db, id: id) { + student.name = name + try student.update(db) + } +} +``` + + +## Error Handling + +GRDB can throw [DatabaseError](#databaseerror), [RecordError], or crash your program with a [fatal error](#fatal-errors). + +Considering that a local database is not some JSON loaded from a remote server, GRDB focuses on **trusted databases**. Dealing with [untrusted databases](#how-to-deal-with-untrusted-inputs) requires extra care. + +- [DatabaseError](#databaseerror) +- [RecordError] +- [Fatal Errors](#fatal-errors) +- [How to Deal with Untrusted Inputs](#how-to-deal-with-untrusted-inputs) +- [Error Log](#error-log) + + +### DatabaseError + +📖 [`DatabaseError`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseerror) + +**DatabaseError** are thrown on SQLite errors: + +```swift +do { + try Pet(masterId: 1, name: "Bobby").insert(db) +} catch let error as DatabaseError { + // The SQLite error code: 19 (SQLITE_CONSTRAINT) + error.resultCode + + // The extended error code: 787 (SQLITE_CONSTRAINT_FOREIGNKEY) + error.extendedResultCode + + // The eventual SQLite message: FOREIGN KEY constraint failed + error.message + + // The eventual erroneous SQL query + // "INSERT INTO pet (masterId, name) VALUES (?, ?)" + error.sql + + // The eventual SQL arguments + // [1, "Bobby"] + error.arguments + + // Full error description + // > SQLite error 19: FOREIGN KEY constraint failed - + // > while executing `INSERT INTO pet (masterId, name) VALUES (?, ?)` + error.description +} +``` + +If you want to see statement arguments in the error description, [make statement arguments public](https://swiftpackageindex.com/groue/grdb.swift/configuration/publicstatementarguments). + +**SQLite uses [results codes](https://www.sqlite.org/rescode.html) to distinguish between various errors**. + +You can catch a DatabaseError and match on result codes: + +```swift +do { + try ... +} catch let error as DatabaseError { + switch error { + case DatabaseError.SQLITE_CONSTRAINT_FOREIGNKEY: + // foreign key constraint error + case DatabaseError.SQLITE_CONSTRAINT: + // any other constraint error + default: + // any other database error + } +} +``` + +You can also directly match errors on result codes: + +```swift +do { + try ... +} catch DatabaseError.SQLITE_CONSTRAINT_FOREIGNKEY { + // foreign key constraint error +} catch DatabaseError.SQLITE_CONSTRAINT { + // any other constraint error +} catch { + // any other database error +} +``` + +Each DatabaseError has two codes: an `extendedResultCode` (see [extended result code](https://www.sqlite.org/rescode.html#extended_result_code_list)), and a less precise `resultCode` (see [primary result code](https://www.sqlite.org/rescode.html#primary_result_code_list)). Extended result codes are refinements of primary result codes, as `SQLITE_CONSTRAINT_FOREIGNKEY` is to `SQLITE_CONSTRAINT`, for example. + +> **Warning**: SQLite has progressively introduced extended result codes across its versions. The [SQLite release notes](http://www.sqlite.org/changes.html) are unfortunately not quite clear about that: write your handling of extended result codes with care. + + +### RecordError + +📖 [`RecordError`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recorderror) + +**RecordError** is thrown by the [PersistableRecord] protocol when the `update` method could not find any row to update: + +```swift +do { + try player.update(db) +} catch let RecordError.recordNotFound(databaseTableName: table, key: key) { + print("Key \(key) was not found in table \(table).") +} +``` + +**RecordError** is also thrown by the [FetchableRecord] protocol when the `find` method does not find any record: + +```swift +do { + let player = try Player.find(db, id: 42) +} catch let RecordError.recordNotFound(databaseTableName: table, key: key) { + print("Key \(key) was not found in table \(table).") +} +``` + + +### Fatal Errors + +**Fatal errors notify that the program, or the database, has to be changed.** + +They uncover programmer errors, false assumptions, and prevent misuses. Here are a few examples: + +- **The code asks for a non-optional value, when the database contains NULL:** + + ```swift + // fatal error: could not convert NULL to String. + let name: String = row["name"] + ``` + + Solution: fix the contents of the database, use [NOT NULL constraints](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/columndefinition/notnull(onconflict:)), or load an optional: + + ```swift + let name: String? = row["name"] + ``` + +- **Conversion from database value to Swift type fails:** + + ```swift + // fatal error: could not convert "Mom’s birthday" to Date. + let date: Date = row["date"] + + // fatal error: could not convert "" to URL. + let url: URL = row["url"] + ``` + + Solution: fix the contents of the database, or use [DatabaseValue](#databasevalue) to handle all possible cases: + + ```swift + let dbValue: DatabaseValue = row["date"] + if dbValue.isNull { + // Handle NULL + } else if let date = Date.fromDatabaseValue(dbValue) { + // Handle valid date + } else { + // Handle invalid date + } + ``` + +- **The database can't guarantee that the code does what it says:** + + ```swift + // fatal error: table player has no unique index on column email + try Player.deleteOne(db, key: ["email": "arthur@example.com"]) + ``` + + Solution: add a unique index to the player.email column, or use the `deleteAll` method to make it clear that you may delete more than one row: + + ```swift + try Player.filter(Column("email") == "arthur@example.com").deleteAll(db) + ``` + +- **Database connections are not reentrant:** + + ```swift + // fatal error: Database methods are not reentrant. + dbQueue.write { db in + dbQueue.write { db in + ... + } + } + ``` + + Solution: avoid reentrancy, and instead pass a database connection along. + + +### How to Deal with Untrusted Inputs + +Let's consider the code below: + +```swift +let sql = "SELECT ..." + +// Some untrusted arguments for the query +let arguments: [String: Any] = ... +let rows = try Row.fetchCursor(db, sql: sql, arguments: StatementArguments(arguments)) + +while let row = try rows.next() { + // Some untrusted database value: + let date: Date? = row[0] +} +``` + +It has two opportunities to throw fatal errors: + +- **Untrusted arguments**: The dictionary may contain values that do not conform to the [DatabaseValueConvertible protocol](#values), or may miss keys required by the statement. +- **Untrusted database content**: The row may contain a non-null value that can't be turned into a date. + +In such a situation, you can still avoid fatal errors by exposing and handling each failure point, one level down in the GRDB API: + +```swift +// Untrusted arguments +if let arguments = StatementArguments(arguments) { + let statement = try db.makeStatement(sql: sql) + try statement.setArguments(arguments) + + var cursor = try Row.fetchCursor(statement) + while let row = try iterator.next() { + // Untrusted database content + let dbValue: DatabaseValue = row[0] + if dbValue.isNull { + // Handle NULL + if let date = Date.fromDatabaseValue(dbValue) { + // Handle valid date + } else { + // Handle invalid date + } + } +} +``` + +See [`Statement`] and [DatabaseValue](#databasevalue) for more information. + + +### Error Log + +**SQLite can be configured to invoke a callback function containing an error code and a terse error message whenever anomalies occur.** + +This global error callback must be configured early in the lifetime of your application: + +```swift +Database.logError = { (resultCode, message) in + NSLog("%@", "SQLite error \(resultCode): \(message)") +} +``` + +> **Warning**: Database.logError must be set before any database connection is opened. This includes the connections that your application opens with GRDB, but also connections opened by other tools, such as third-party libraries. Setting it after a connection has been opened is an SQLite misuse, and has no effect. + +See [The Error And Warning Log](https://sqlite.org/errlog.html) for more information. + + +## Unicode + +SQLite lets you store unicode strings in the database. + +However, SQLite does not provide any unicode-aware string transformations or comparisons. + + +### Unicode functions + +The `UPPER` and `LOWER` built-in SQLite functions are not unicode-aware: + +```swift +// "JéRôME" +try String.fetchOne(db, sql: "SELECT UPPER('Jérôme')") +``` + +GRDB extends SQLite with [SQL functions](#custom-sql-functions-and-aggregates) that call the Swift built-in string functions `capitalized`, `lowercased`, `uppercased`, `localizedCapitalized`, `localizedLowercased` and `localizedUppercased`: + +```swift +// "JÉRÔME" +let uppercased = DatabaseFunction.uppercase +try String.fetchOne(db, sql: "SELECT \(uppercased.name)('Jérôme')") +``` + +Those unicode-aware string functions are also readily available in the [query interface](#sql-functions): + +```swift +Player.select(nameColumn.uppercased) +``` + + +### String Comparison + +SQLite compares strings in many occasions: when you sort rows according to a string column, or when you use a comparison operator such as `=` and `<=`. + +The comparison result comes from a *collating function*, or *collation*. SQLite comes with three built-in collations that do not support Unicode: [binary, nocase, and rtrim](https://www.sqlite.org/datatype3.html#collation). + +GRDB comes with five extra collations that leverage unicode-aware comparisons based on the standard Swift String comparison functions and operators: + +- `unicodeCompare` (uses the built-in `<=` and `==` Swift operators) +- `caseInsensitiveCompare` +- `localizedCaseInsensitiveCompare` +- `localizedCompare` +- `localizedStandardCompare` + +A collation can be applied to a table column. All comparisons involving this column will then automatically trigger the comparison function: + +```swift +try db.create(table: "player") { t in + // Guarantees case-insensitive email unicity + t.column("email", .text).unique().collate(.nocase) + + // Sort names in a localized case insensitive way + t.column("name", .text).collate(.localizedCaseInsensitiveCompare) +} + +// Players are sorted in a localized case insensitive way: +let players = try Player.order(nameColumn).fetchAll(db) +``` + +> **Warning**: SQLite *requires* host applications to provide the definition of any collation other than binary, nocase and rtrim. When a database file has to be shared or migrated to another SQLite library of platform (such as the Android version of your application), make sure you provide a compatible collation. + +If you can't or don't want to define the comparison behavior of a column (see warning above), you can still use an explicit collation in SQL requests and in the [query interface](#the-query-interface): + +```swift +let collation = DatabaseCollation.localizedCaseInsensitiveCompare +let players = try Player.fetchAll(db, + sql: "SELECT * FROM player ORDER BY name COLLATE \(collation.name))") +let players = try Player.order(nameColumn.collating(collation)).fetchAll(db) +``` + + +**You can also define your own collations**: + +```swift +let collation = DatabaseCollation("customCollation") { (lhs, rhs) -> NSComparisonResult in + // return the comparison of lhs and rhs strings. +} + +// Make the collation available to a database connection +var config = Configuration() +config.prepareDatabase { db in + db.add(collation: collation) +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) +``` + + + +## Memory Management + +Both SQLite and GRDB use non-essential memory that help them perform better. + +You can reclaim this memory with the `releaseMemory` method: + +```swift +// Release as much memory as possible. +dbQueue.releaseMemory() +dbPool.releaseMemory() +``` + +This method blocks the current thread until all current database accesses are completed, and the memory collected. + +> **Warning**: If `DatabasePool.releaseMemory()` is called while a long read is performed concurrently, then no other read access will be possible until this long read has completed, and the memory has been released. If this does not suit your application needs, look for the asynchronous options below: + +You can release memory in an asynchronous way as well: + +```swift +// On a DatabaseQueue +dbQueue.asyncWriteWithoutTransaction { db in + db.releaseMemory() +} + +// On a DatabasePool +dbPool.releaseMemoryEventually() +``` + +`DatabasePool.releaseMemoryEventually()` does not block the current thread, and does not prevent concurrent database accesses. In exchange for this convenience, you don't know when memory has been freed. + + +### Memory Management on iOS + +**The iOS operating system likes applications that do not consume much memory.** + +[Database queues] and [pools](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasepool) automatically free non-essential memory when the application receives a memory warning, and when the application enters background. + +You can opt out of this automatic memory management: + +```swift +var config = Configuration() +config.automaticMemoryManagement = false +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) // or DatabasePool +``` + +FAQ +=== + +**[FAQ: Opening Connections](#faq-opening-connections)** + +- [How do I create a database in my application?](#how-do-i-create-a-database-in-my-application) +- [How do I open a database stored as a resource of my application?](#how-do-i-open-a-database-stored-as-a-resource-of-my-application) +- [How do I close a database connection?](#how-do-i-close-a-database-connection) + +**[FAQ: SQL](#faq-sql)** + +- [How do I print a request as SQL?](#how-do-i-print-a-request-as-sql) + +**[FAQ: General](#faq-general)** + +- [How do I monitor the duration of database statements execution?](#how-do-i-monitor-the-duration-of-database-statements-execution) +- [What Are Experimental Features?](#what-are-experimental-features) +- [Does GRDB support library evolution and ABI stability?](#does-grdb-support-library-evolution-and-abi-stability) + +**[FAQ: Associations](#faq-associations)** + +- [How do I filter records and only keep those that are associated to another record?](#how-do-i-filter-records-and-only-keep-those-that-are-associated-to-another-record) +- [How do I filter records and only keep those that are NOT associated to another record?](#how-do-i-filter-records-and-only-keep-those-that-are-not-associated-to-another-record) +- [How do I select only one column of an associated record?](#how-do-i-select-only-one-column-of-an-associated-record) + +**[FAQ: ValueObservation](#faq-valueobservation)** + +- [Why is ValueObservation not publishing value changes?](#why-is-valueobservation-not-publishing-value-changes) + +**[FAQ: Errors](#faq-errors)** + +- [Generic parameter 'T' could not be inferred](#generic-parameter-t-could-not-be-inferred) +- [Mutation of captured var in concurrently-executing code](#mutation-of-captured-var-in-concurrently-executing-code) +- [SQLite error 1 "no such column"](#sqlite-error-1-no-such-column) +- [SQLite error 10 "disk I/O error", SQLite error 23 "not authorized"](#sqlite-error-10-disk-io-error-sqlite-error-23-not-authorized) +- [SQLite error 21 "wrong number of statement arguments" with LIKE queries](#sqlite-error-21-wrong-number-of-statement-arguments-with-like-queries) + + +## FAQ: Opening Connections + +- :arrow_up: [FAQ] +- [How do I create a database in my application?](#how-do-i-create-a-database-in-my-application) +- [How do I open a database stored as a resource of my application?](#how-do-i-open-a-database-stored-as-a-resource-of-my-application) +- [How do I close a database connection?](#how-do-i-close-a-database-connection) + +### How do I create a database in my application? + +First choose a proper location for the database file. Document-based applications will let the user pick a location. Apps that use the database as a global storage will prefer the Application Support directory. + +The sample code below creates or opens a database file inside its dedicated directory (a [recommended practice](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseconnections)). On the first run, a new empty database file is created. On subsequent runs, the database file already exists, so it just opens a connection: + +```swift +// HOW TO create an empty database, or open an existing database file + +// Create the "Application Support/MyDatabase" directory +let fileManager = FileManager.default +let appSupportURL = try fileManager.url( + for: .applicationSupportDirectory, in: .userDomainMask, + appropriateFor: nil, create: true) +let directoryURL = appSupportURL.appendingPathComponent("MyDatabase", isDirectory: true) +try fileManager.createDirectory(at: directoryURL, withIntermediateDirectories: true) + +// Open or create the database +let databaseURL = directoryURL.appendingPathComponent("db.sqlite") +let dbQueue = try DatabaseQueue(path: databaseURL.path) +``` + +### How do I open a database stored as a resource of my application? + +Open a read-only connection to your resource: + +```swift +// HOW TO open a read-only connection to a database resource + +// Get the path to the database resource. +if let dbPath = Bundle.main.path(forResource: "db", ofType: "sqlite") + +if let dbPath { + // If the resource exists, open a read-only connection. + // Writes are disallowed because resources can not be modified. + var config = Configuration() + config.readonly = true + let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) +} else { + // The database resource can not be found. + // Fix your setup, or report the problem to the user. +} +``` + +### How do I close a database connection? + +Database connections are automatically closed when `DatabaseQueue` or `DatabasePool` instances are deinitialized. + +If the correct execution of your program depends on precise database closing, perform an explicit call to [`close()`](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasereader/close()). This method may fail and create zombie connections, so please check its detailed documentation. + +## FAQ: SQL + +- :arrow_up: [FAQ] +- [How do I print a request as SQL?](#how-do-i-print-a-request-as-sql) + +### How do I print a request as SQL? + +When you want to debug a request that does not deliver the expected results, you may want to print the SQL that is actually executed. + +You can compile the request into a prepared [`Statement`]: + +```swift +try dbQueue.read { db in + let request = Player.filter(Column("email") == "arthur@example.com") + let statement = try request.makePreparedRequest(db).statement + print(statement) // SELECT * FROM player WHERE email = ? + print(statement.arguments) // ["arthur@example.com"] +} +``` + +Another option is to setup a tracing function that prints out the executed SQL requests. For example, provide a tracing function when you connect to the database: + +```swift +// Prints all SQL statements +var config = Configuration() +config.prepareDatabase { db in + db.trace { print($0) } +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) + +try dbQueue.read { db in + // Prints "SELECT * FROM player WHERE email = ?" + let players = try Player.filter(Column("email") == "arthur@example.com").fetchAll(db) +} +``` + +If you want to see statement arguments such as `'arthur@example.com'` in the logged statements, [make statement arguments public](https://swiftpackageindex.com/groue/grdb.swift/configuration/publicstatementarguments). + +> **Note**: the generated SQL may change between GRDB releases, without notice: don't have your application rely on any specific SQL output. + + +## FAQ: General + +- :arrow_up: [FAQ] +- [How do I monitor the duration of database statements execution?](#how-do-i-monitor-the-duration-of-database-statements-execution) +- [What Are Experimental Features?](#what-are-experimental-features) +- [Does GRDB support library evolution and ABI stability?](#does-grdb-support-library-evolution-and-abi-stability) + +### How do I monitor the duration of database statements execution? + +Use the `trace(options:_:)` method, with the `.profile` option: + +```swift +var config = Configuration() +config.prepareDatabase { db in + db.trace(options: .profile) { event in + // Prints all SQL statements with their duration + print(event) + + // Access to detailed profiling information + if case let .profile(statement, duration) = event, duration > 0.5 { + print("Slow query: \(statement.sql)") + } + } +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) + +try dbQueue.read { db in + let players = try Player.filter(Column("email") == "arthur@example.com").fetchAll(db) + // Prints "0.003s SELECT * FROM player WHERE email = ?" +} +``` + +If you want to see statement arguments such as `'arthur@example.com'` in the logged statements, [make statement arguments public](https://swiftpackageindex.com/groue/grdb.swift/configuration/publicstatementarguments). + +### What Are Experimental Features? + +Since GRDB 1.0, all backwards compatibility guarantees of [semantic versioning](http://semver.org) apply: no breaking change will happen until the next major version of the library. + +There is an exception, though: *experimental features*, marked with the "**:fire: EXPERIMENTAL**" badge. Those are advanced features that are too young, or lack user feedback. They are not stabilized yet. + +Those experimental features are not protected by semantic versioning, and may break between two minor releases of the library. To help them becoming stable, [your feedback](https://github.com/groue/GRDB.swift/issues) is greatly appreciated. + +### Does GRDB support library evolution and ABI stability? + +No, GRDB does not support library evolution and ABI stability. The only promise is API stability according to [semantic versioning](http://semver.org), with an exception for [experimental features](#what-are-experimental-features). + +Yet, GRDB can be built with the "Build Libraries for Distribution" Xcode option (`BUILD_LIBRARY_FOR_DISTRIBUTION`), so that you can build binary frameworks at your convenience. + +## FAQ: Associations + +- :arrow_up: [FAQ] +- [How do I filter records and only keep those that are associated to another record?](#how-do-i-filter-records-and-only-keep-those-that-are-associated-to-another-record) +- [How do I filter records and only keep those that are NOT associated to another record?](#how-do-i-filter-records-and-only-keep-those-that-are-not-associated-to-another-record) +- [How do I select only one column of an associated record?](#how-do-i-select-only-one-column-of-an-associated-record) + +### How do I filter records and only keep those that are associated to another record? + +Let's say you have two record types, `Book` and `Author`, and you want to only fetch books that have an author, and discard anonymous books. + +We start by defining the association between books and authors: + +```swift +struct Book: TableRecord { + ... + static let author = belongsTo(Author.self) +} + +struct Author: TableRecord { + ... +} +``` + +And then we can write our request and only fetch books that have an author, discarding anonymous ones: + +```swift +let books: [Book] = try dbQueue.read { db in + // SELECT book.* FROM book + // JOIN author ON author.id = book.authorID + let request = Book.joining(required: Book.author) + return try request.fetchAll(db) +} +``` + +Note how this request does not use the `filter` method. Indeed, we don't have any condition to express on any column. Instead, we just need to "require that a book can be joined to its author". + +See [How do I filter records and only keep those that are NOT associated to another record?](#how-do-i-filter-records-and-only-keep-those-that-are-not-associated-to-another-record) below for the opposite question. + + +### How do I filter records and only keep those that are NOT associated to another record? + +Let's say you have two record types, `Book` and `Author`, and you want to only fetch anonymous books that do not have any author. + +We start by defining the association between books and authors: + +```swift +struct Book: TableRecord { + ... + static let author = belongsTo(Author.self) +} + +struct Author: TableRecord { + ... +} +``` + +And then we can write our request and only fetch anonymous books that don't have any author: + +```swift +let books: [Book] = try dbQueue.read { db in + // SELECT book.* FROM book + // LEFT JOIN author ON author.id = book.authorID + // WHERE author.id IS NULL + let authorAlias = TableAlias() + let request = Book + .joining(optional: Book.author.aliased(authorAlias)) + .filter(!authorAlias.exists) + return try request.fetchAll(db) +} +``` + +This request uses a TableAlias in order to be able to filter on the eventual associated author. We make sure that the `Author.primaryKey` is nil, which is another way to say it does not exist: the book has no author. + +See [How do I filter records and only keep those that are associated to another record?](#how-do-i-filter-records-and-only-keep-those-that-are-associated-to-another-record) above for the opposite question. + + +### How do I select only one column of an associated record? + +Let's say you have two record types, `Book` and `Author`, and you want to fetch all books with their author name, but not the full associated author records. + +We start by defining the association between books and authors: + +```swift +struct Book: Decodable, TableRecord { + ... + static let author = belongsTo(Author.self) +} + +struct Author: Decodable, TableRecord { + ... + enum Columns { + static let name = Column(CodingKeys.name) + } +} +``` + +And then we can write our request and the ad-hoc record that decodes it: + +```swift +struct BookInfo: Decodable, FetchableRecord { + var book: Book + var authorName: String? // nil when the book is anonymous + + static func all() -> QueryInterfaceRequest { + // SELECT book.*, author.name AS authorName + // FROM book + // LEFT JOIN author ON author.id = book.authorID + let authorName = Author.Columns.name.forKey(CodingKeys.authorName) + return Book + .annotated(withOptional: Book.author.select(authorName)) + .asRequest(of: BookInfo.self) + } +} + +let bookInfos: [BookInfo] = try dbQueue.read { db in + BookInfo.all().fetchAll(db) +} +``` + +By defining the request as a static method of BookInfo, you have access to the private `CodingKeys.authorName`, and a compiler-checked SQL column name. + +By using the `annotated(withOptional:)` method, you append the author name to the top-level selection that can be decoded by the ad-hoc record. + +By using `asRequest(of:)`, you enhance the type-safety of your request. + + +## FAQ: ValueObservation + +- :arrow_up: [FAQ] +- [Why is ValueObservation not publishing value changes?](#why-is-valueobservation-not-publishing-value-changes) + +### Why is ValueObservation not publishing value changes? + +Sometimes it looks that a [ValueObservation] does not notify the changes you expect. + +There may be four possible reasons for this: + +1. The expected changes were not committed into the database. +2. The expected changes were committed into the database, but were quickly overwritten. +3. The observation was stopped. +4. The observation does not track the expected database region. + +To answer the first two questions, look at SQL statements executed by the database. This is done when you open the database connection: + +```swift +// Prints all SQL statements +var config = Configuration() +config.prepareDatabase { db in + db.trace { print("SQL: \($0)") } +} +let dbQueue = try DatabaseQueue(path: dbPath, configuration: config) +``` + +If, after that, you are convinced that the expected changes were committed into the database, and not overwritten soon after, trace observation events: + +```swift +let observation = ValueObservation + .tracking { db in ... } + .print() // <- trace observation events +let cancellable = observation.start(...) +``` + +Look at the observation logs which start with `cancel` or `failure`: maybe the observation was cancelled by your app, or did fail with an error. + +Look at the observation logs which start with `value`: make sure, again, that the expected value was not actually notified, then overwritten. + +Finally, look at the observation logs which start with `tracked region`. Does the printed database region cover the expected changes? + +For example: + +- `empty`: The empty region, which tracks nothing and never triggers the observation. +- `player(*)`: The full `player` table +- `player(id,name)`: The `id` and `name` columns of the `player` table +- `player(id,name)[1]`: The `id` and `name` columns of the row with id 1 in the `player` table +- `player(*),team(*)`: Both the full `player` and `team` tables + +If you happen to use the `ValueObservation.trackingConstantRegion(_:)` method and see a mismatch between the tracked region and your expectation, then change the definition of your observation by using `tracking(_:)`. You should witness that the logs which start with `tracked region` now evolve in order to include the expected changes, and that you get the expected notifications. + +If after all those steps (thanks you!), your observation is still failing you, please [open an issue](https://github.com/groue/GRDB.swift/issues/new) and provide a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example)! + + +## FAQ: Errors + +- :arrow_up: [FAQ] +- [Generic parameter 'T' could not be inferred](#generic-parameter-t-could-not-be-inferred) +- [Mutation of captured var in concurrently-executing code](#mutation-of-captured-var-in-concurrently-executing-code) +- [SQLite error 1 "no such column"](#sqlite-error-1-no-such-column) +- [SQLite error 10 "disk I/O error", SQLite error 23 "not authorized"](#sqlite-error-10-disk-io-error-sqlite-error-23-not-authorized) +- [SQLite error 21 "wrong number of statement arguments" with LIKE queries](#sqlite-error-21-wrong-number-of-statement-arguments-with-like-queries) + +### Generic parameter 'T' could not be inferred + +You may get this error when using the `read` and `write` methods of database queues and pools: + +```swift +// Generic parameter 'T' could not be inferred +let string = try dbQueue.read { db in + let result = try String.fetchOne(db, ...) + return result +} +``` + +This is a limitation of the Swift compiler. + +The general workaround is to explicitly declare the type of the closure result: + +```swift +// General Workaround +let string = try dbQueue.read { db -> String? in + let result = try String.fetchOne(db, ...) + return result +} +``` + +You can also, when possible, write a single-line closure: + +```swift +// Single-line closure workaround: +let string = try dbQueue.read { db in + try String.fetchOne(db, ...) +} +``` + + +### Mutation of captured var in concurrently-executing code + +The `insert` and `save` [persistence methods](#persistablerecord-protocol) can trigger a compiler error in async contexts: + +```swift +var player = Player(id: nil, name: "Arthur") +try await dbWriter.write { db in + // Error: Mutation of captured var 'player' in concurrently-executing code + try player.insert(db) +} +print(player.id) // A non-nil id +``` + +When this happens, prefer the `inserted` and `saved` methods instead: + +```swift +// OK +var player = Player(id: nil, name: "Arthur") +player = try await dbWriter.write { [player] db in + return try player.inserted(db) +} +print(player.id) // A non-nil id +``` + + +### SQLite error 1 "no such column" + +This error message is self-explanatory: do check for misspelled or non-existing column names. + +However, sometimes this error only happens when an app runs on a recent operating system (iOS 14+, Big Sur+, etc.) The error does not happen with previous ones. + +When this is the case, there are two possible explanations: + +1. Maybe a column name is *really* misspelled or missing from the database schema. + + To find it, check the SQL statement that comes with the [DatabaseError](#databaseerror). + +2. Maybe the application is using the character `"` instead of the single quote `'` as the delimiter for string literals in raw SQL queries. Recent versions of SQLite have learned to tell about this deviation from the SQL standard, and this is why you are seeing this error. + + For example: this is not standard SQL: `UPDATE player SET name = "Arthur"`. + + The standard version is: `UPDATE player SET name = 'Arthur'`. + + It just happens that old versions of SQLite used to accept the former, non-standard version. Newer versions are able to reject it with an error. + + The fix is to change the SQL statements run by the application: replace `"` with `'` in your string literals. + + It may also be time to learn about statement arguments and [SQL injection](#avoiding-sql-injection): + + ```swift + let name: String = ... + + // NOT STANDARD (double quote) + try db.execute(sql: """ + UPDATE player SET name = "\(name)" + """) + + // STANDARD, BUT STILL NOT RECOMMENDED (single quote) + try db.execute(sql: "UPDATE player SET name = '\(name)'") + + // STANDARD, AND RECOMMENDED (statement arguments) + try db.execute(sql: "UPDATE player SET name = ?", arguments: [name]) + ``` + +For more information, see [Double-quoted String Literals Are Accepted](https://sqlite.org/quirks.html#double_quoted_string_literals_are_accepted), and [Configuration.acceptsDoubleQuotedStringLiterals](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/configuration/acceptsdoublequotedstringliterals). + + + +### SQLite error 10 "disk I/O error", SQLite error 23 "not authorized" + +Those errors may be the sign that SQLite can't access the database due to [data protection](https://developer.apple.com/documentation/uikit/protecting_the_user_s_privacy/encrypting_your_app_s_files). + +When your application should be able to run in the background on a locked device, it has to catch this error, and, for example, wait for [UIApplicationDelegate.applicationProtectedDataDidBecomeAvailable(_:)](https://developer.apple.com/reference/uikit/uiapplicationdelegate/1623044-applicationprotecteddatadidbecom) or [UIApplicationProtectedDataDidBecomeAvailable](https://developer.apple.com/reference/uikit/uiapplicationprotecteddatadidbecomeavailable) notification and retry the failed database operation. + +```swift +do { + try ... +} catch DatabaseError.SQLITE_IOERR, DatabaseError.SQLITE_AUTH { + // Handle possible data protection error +} +``` + +This error can also be prevented altogether by using a more relaxed [file protection](https://developer.apple.com/reference/foundation/filemanager/1653059-file_protection_values). + + +### SQLite error 21 "wrong number of statement arguments" with LIKE queries + +You may get the error "wrong number of statement arguments" when executing a LIKE query similar to: + +```swift +let name = textField.text +let players = try dbQueue.read { db in + try Player.fetchAll(db, sql: "SELECT * FROM player WHERE name LIKE '%?%'", arguments: [name]) +} +``` + +The problem lies in the `'%?%'` pattern. + +SQLite only interprets `?` as a parameter when it is a placeholder for a whole value (int, double, string, blob, null). In this incorrect query, `?` is just a character in the `'%?%'` string: it is not a query parameter, and is not processed in any way. See [https://www.sqlite.org/lang_expr.html#varparam](https://www.sqlite.org/lang_expr.html#varparam) for more information about SQLite parameters. + +To fix the error, you can feed the request with the pattern itself, instead of the name: + +```swift +let name = textField.text +let players: [Player] = try dbQueue.read { db in + let pattern = "%\(name)%" + return try Player.fetchAll(db, sql: "SELECT * FROM player WHERE name LIKE ?", arguments: [pattern]) +} +``` + + +Sample Code +=========== + +- The [Documentation](#documentation) is full of GRDB snippets. +- [Demo Applications] +- Open `GRDB.xcworkspace`: it contains GRDB-enabled playgrounds to play with. +- [groue/SortedDifference](https://github.com/groue/SortedDifference): How to synchronize a database table with a JSON payload + + +--- + +**Thanks** + +- [Pierlis](http://pierlis.com), where we write great software. +- [@alextrob](https://github.com/alextrob), [@alexwlchan](https://github.com/alexwlchan), [@bellebethcooper](https://github.com/bellebethcooper), [@bfad](https://github.com/bfad), [@cfilipov](https://github.com/cfilipov), [@charlesmchen-signal](https://github.com/charlesmchen-signal), [@Chiliec](https://github.com/Chiliec), [@chrisballinger](https://github.com/chrisballinger), [@darrenclark](https://github.com/darrenclark), [@davidkraus](https://github.com/davidkraus), [@eburns-vmware](https://github.com/eburns-vmware), [@felixscheinost](https://github.com/felixscheinost), [@fpillet](https://github.com/fpillet), [@gcox](https://github.com/gcox), [@GetToSet](https://github.com/GetToSet), [@gjeck](https://github.com/gjeck), [@guidedways](https://github.com/guidedways), [@gusrota](https://github.com/gusrota), [@haikusw](https://github.com/haikusw), [@hartbit](https://github.com/hartbit), [@holsety](https://github.com/holsety), [@jroselightricks](https://github.com/jroselightricks), [@kdubb](https://github.com/kdubb), [@kluufger](https://github.com/kluufger), [@KyleLeneau](https://github.com/KyleLeneau), [@layoutSubviews](https://github.com/layoutSubviews), [@mallman](https://github.com/mallman), [@MartinP7r](https://github.com/MartinP7r), [@Marus](https://github.com/Marus), [@mattgallagher](https://github.com/mattgallagher), [@MaxDesiatov](https://github.com/MaxDesiatov), [@michaelkirk-signal](https://github.com/michaelkirk-signal), [@mtancock](https://github.com/mtancock), [@pakko972](https://github.com/pakko972), [@peter-ss](https://github.com/peter-ss), [@pierlo](https://github.com/pierlo), [@pocketpixels](https://github.com/pocketpixels), [@pp5x](https://github.com/pp5x), [@professordeng](https://github.com/professordeng), [@robcas3](https://github.com/robcas3), [@runhum](https://github.com/runhum), [@sberrevoets](https://github.com/sberrevoets), [@schveiguy](https://github.com/schveiguy), [@SD10](https://github.com/SD10), [@sobri909](https://github.com/sobri909), [@sroddy](https://github.com/sroddy), [@steipete](https://github.com/steipete), [@swiftlyfalling](https://github.com/swiftlyfalling), [@Timac](https://github.com/Timac), [@tternes](https://github.com/tternes), [@valexa](https://github.com/valexa), [@wuyuehyang](https://github.com/wuyuehyang), [@ZevEisenberg](https://github.com/ZevEisenberg), and [@zmeyc](https://github.com/zmeyc) for their contributions, help, and feedback on GRDB. +- [@aymerick](https://github.com/aymerick) and [@kali](https://github.com/kali) because SQL. +- [ccgus/fmdb](https://github.com/ccgus/fmdb) for its excellency. + +--- + +[URIs don't change: people change them.](https://www.w3.org/Provider/Style/URI) + +#### Adding support for missing SQL functions or operators + +This chapter was renamed to [Embedding SQL in Query Interface Requests]. + +#### Advanced DatabasePool + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency). + +#### After Commit Hook + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/afternexttransaction(oncommit:onrollback:)). + +#### Asynchronous APIs + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency). + +#### Changes Tracking + +This chapter has been renamed [Record Comparison]. + +#### Concurrency + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency). + +#### Custom Value Types + +Custom Value Types conform to the [`DatabaseValueConvertible`] protocol. + +#### Customized Decoding of Database Rows + +This chapter has been renamed [Beyond FetchableRecord]. + +#### Customizing the Persistence Methods + +This chapter was replaced with [Persistence Callbacks]. + +#### Database Changes Observation + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseobservation). + +#### Database Configuration + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/configuration). + +#### Database Queues + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasequeue). + +#### Database Pools + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasepool). + +#### Database Snapshots + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency). + +#### DatabaseWriter and DatabaseReader Protocols + +This chapter was removed. See the references of [DatabaseReader](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasereader) and [DatabaseWriter](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasewriter). + +#### Date and UUID Coding Strategies + +This chapter has been renamed [Data, Date, and UUID Coding Strategies]. + +#### Dealing with External Connections + +This chapter has been superseded by the [Sharing a Database] guide. + +#### Differences between Database Queues and Pools + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency). + +#### Enabling FTS5 Support + +FTS5 is enabled by default since GRDB 6.7.0. + +#### FetchedRecordsController + +FetchedRecordsController has been removed in GRDB 5. + +The [Database Observation] chapter describes the other ways to observe the database. + +#### Full-Text Search + +This chapter has [moved](Documentation/FullTextSearch.md). + +#### Guarantees and Rules + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency). + +#### Joined Queries Support + +This chapter was replaced with the documentation of [splittingRowAdapters(columnCounts:)](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/splittingrowadapters(columncounts:)). + +#### Migrations + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/migrations). + +#### NSNumber and NSDecimalNumber + +This chapter has [moved](#nsnumber-nsdecimalnumber-and-decimal). + +#### Persistable Protocol + +This protocol has been renamed [PersistableRecord] in GRDB 3.0. + +#### PersistenceError + +This error was renamed to [RecordError]. + +#### Prepared Statements + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statement). + +#### Row Adapters + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/rowadapter). + +#### RowConvertible Protocol + +This protocol has been renamed [FetchableRecord] in GRDB 3.0. + +#### TableMapping Protocol + +This protocol has been renamed [TableRecord] in GRDB 3.0. + +#### Transactions and Savepoints + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/transactions). + +#### Transaction Hook + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/database/afternexttransaction(oncommit:onrollback:)). + +#### TransactionObserver Protocol + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/transactionobserver). + +#### Unsafe Concurrency APIs + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency). + +#### ValueObservation + +This chapter has [moved](https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation). + +#### ValueObservation and DatabaseRegionObservation + +This chapter has been superseded by [ValueObservation] and [DatabaseRegionObservation]. + +[Associations]: Documentation/AssociationsBasics.md +[Beyond FetchableRecord]: #beyond-fetchablerecord +[Identifiable Records]: #identifiable-records +[Codable Records]: #codable-records +[Columns Selected by a Request]: #columns-selected-by-a-request +[common table expression]: Documentation/CommonTableExpressions.md +[Common Table Expressions]: Documentation/CommonTableExpressions.md +[Conflict Resolution]: #conflict-resolution +[Column Names Coding Strategies]: #column-names-coding-strategies +[Data, Date, and UUID Coding Strategies]: #data-date-and-uuid-coding-strategies +[Fetching from Requests]: #fetching-from-requests +[Embedding SQL in Query Interface Requests]: #embedding-sql-in-query-interface-requests +[Full-Text Search]: Documentation/FullTextSearch.md +[Migrations]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/migrations +[The userInfo Dictionary]: #the-userinfo-dictionary +[JSON Columns]: #json-columns +[FetchableRecord]: #fetchablerecord-protocol +[EncodableRecord]: #persistablerecord-protocol +[PersistableRecord]: #persistablerecord-protocol +[Record Comparison]: #record-comparison +[Record Customization Options]: #record-customization-options +[Persistence Callbacks]: #persistence-callbacks +[persistence callbacks]: #persistence-callbacks +[Record Timestamps and Transaction Date]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/recordtimestamps +[TableRecord]: #tablerecord-protocol +[ValueObservation]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/valueobservation +[DatabaseRegionObservation]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseregionobservation +[RxGRDB]: https://github.com/RxSwiftCommunity/RxGRDB +[DatabaseRegion]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseregion +[SQL Interpolation]: Documentation/SQLInterpolation.md +[custom SQLite build]: Documentation/CustomSQLiteBuilds.md +[Combine]: https://developer.apple.com/documentation/combine +[Combine Support]: Documentation/Combine.md +[Concurrency]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/concurrency +[Demo Applications]: Documentation/DemoApps +[Sharing a Database]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasesharing +[FAQ]: #faq +[Database Observation]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databaseobservation +[SQLRequest]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/sqlrequest +[SQL literal]: Documentation/SQLInterpolation.md#sql-literal +[Identifiable]: https://developer.apple.com/documentation/swift/identifiable +[Query Interface Organization]: Documentation/QueryInterfaceOrganization.md +[Database Configuration]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/configuration +[Persistence Methods]: #persistence-methods +[persistence methods]: #persistence-methods +[Persistence Methods and the `RETURNING` clause]: #persistence-methods-and-the-returning-clause +[RecordError]: #recorderror +[Transactions and Savepoints]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/transactions +[`DatabaseQueue`]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasequeue +[Database queues]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasequeue +[`DatabasePool`]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasepool +[database pools]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasepool +[`DatabaseValueConvertible`]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/databasevalueconvertible +[`StatementArguments`]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statementarguments +[Prepared Statements]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statement +[prepared statements]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statement +[`Statement`]: https://swiftpackageindex.com/groue/grdb.swift/documentation/grdb/statement +[Database Connections]: #database-connections +[Database connections]: #database-connections +[database connection]: #database-connections diff --git a/prepare_release.sh b/prepare_release.sh index 15ba8606d9..b4f02f6738 100755 --- a/prepare_release.sh +++ b/prepare_release.sh @@ -66,8 +66,8 @@ update_sqlcipher_config() { update_readme() { current_version="$(git describe --tags --abbrev=0 --exclude=v* origin/SQLCipher)" - current_upstream_version="$(grep '\* GRDB' .github/README.md | cut -d '*' -f 3)" - current_sqlcipher_version="$(grep '\* SQLCipher' .github/README.md | cut -d '*' -f 3)" + current_upstream_version="$(grep '\* GRDB' README.md | cut -d '*' -f 3)" + current_sqlcipher_version="$(grep '\* SQLCipher' README.md | cut -d '*' -f 3)" grdb_tag="$(git describe --tags --abbrev=0 --match=v* upstream-master)" export new_version upstream_version="${grdb_tag#v}" sqlcipher_version="${sqlcipher_tag#v}" @@ -80,51 +80,80 @@ update_readme() { cat <<- EOF - DuckDuckGo GRDB.swift current version: ${current_version} + Session GRDB.swift current version: ${current_version} Upstream GRDB.swift version: ${current_upstream_version} -> ${upstream_version} SQLCipher version: ${current_sqlcipher_version} -> ${sqlcipher_version} EOF - while ! [[ "${new_version}" =~ [0-9]\.[0-9]\.[0-9] ]]; do - read -rp "Input DuckDuckGo GRDB.swift desired version number (x.y.z): " new_version < /dev/tty + while ! [[ "${new_version}" =~ [0-9]+\.[0-9]+\.[0-9]+ ]]; do + read -rp "Input Session GRDB.swift desired version number (x.y.z): " new_version < /dev/tty done - envsubst < "${cwd}/.github/README.md.in" > "${cwd}/.github/README.md" - git add "${cwd}/.github/README.md" + envsubst < "${cwd}/assets/README.md.in" > "${cwd}/README.md" + git add "${cwd}/README.md" - echo "Updated .github/README.md ✅" + echo "Updated README.md ✅" } build_and_test_release() { - echo "Testing the build ..." + local log_file="${cwd}/.build/Logs/GRDB-${grdb_tag}-unittests.log" + + printf '%s' "Building GRDB ... " rm -rf "${cwd}/.build" + mkdir -p "${cwd}/.build/Logs" && touch "${log_file}" + + if xcodebuild build-for-testing \ + -project "${cwd}/GRDB.xcodeproj" \ + -scheme "GRDB" \ + -derivedDataPath "${cwd}/.build" >"${log_file}" 2>&1; then + + echo "✅" + else + echo "❌" + echo "Failed to build GRDB with SQLCipher support. See log file at ${log_file} for more info." + exit 1 + fi + + echo "Testing GRDB ... ⚙️" # The skipped test references a test database added with a podfile. # We're safe to disable it since we don't care about SQLCipher 3 compatibility anyway. - swift test --skip "EncryptionTests.testSQLCipher3Compatibility" - - echo "" - swift build -c release - - cat <<- EOF - - SQLCipher ${sqlcipher_tag} is ready to use with GRDB.swift ${grdb_tag} 🎉 - - EOF + if xcodebuild test-without-building \ + -project "${cwd}/GRDB.xcodeproj" \ + -scheme "GRDB" \ + -derivedDataPath "${cwd}/.build" \ + -skip-testing:GRDBTests/EncryptionTests/testSQLCipher3Compatibility \ + | tee -a "$log_file" | $log_formatter 2>&1; then + + echo "Unit tests succeeded ✅" + + cat <<- EOF + + SQLCipher ${sqlcipher_tag} is ready to use with GRDB.swift ${grdb_tag} 🎉 + + EOF + else + cat <<-EOF + Unit tests failed ❌ + See log file at ${log_file} for more info. + Rerun with -f to skip testing. + EOF + exit 1 + fi } setup_new_release_branch() { echo "Setting up new release branch ..." - local release_branch="release/${new_version}" + local release_branch="release/Session-${new_version}" git checkout -b "$release_branch" - git add "${cwd}/.github/README.md" "$sqlcipher_path" - git commit -m "DuckDuckGo GRDB.swift ${new_version} (GRDB ${upstream_version}, SQLCipher ${sqlcipher_version})" + git add "${cwd}/README.md" "$sqlcipher_path" + git commit -m "Session GRDB.swift Session-${new_version} (GRDB ${upstream_version}, SQLCipher ${sqlcipher_version})" cat <<- EOF Release is prepared on branch ${release_branch}. - Push the branch when ready and follow .github/README.md for release instructions. + Push the branch when ready and follow README.md for release instructions. EOF }