diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d36da0c0ed..bc7279af93 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -61,7 +61,7 @@ If your pull request introduces a change that may affect the storage or retrieva * Run the tests against the postgres database with `PARSE_SERVER_TEST_DB=postgres PARSE_SERVER_TEST_DATABASE_URI=postgres://postgres:password@localhost:5432/parse_server_postgres_adapter_test_database npm run testonly`. You'll need to have postgres running on your machine and setup [appropriately](https://github.com/parse-community/parse-server/blob/master/.travis.yml#L43) or use [`Docker`](#run-a-parse-postgres-with-docker). * The Postgres adapter has a special debugger that traces all the sql commands. You can enable it with setting the environment variable `PARSE_SERVER_LOG_LEVEL=debug` * If your feature is intended to only work with MongoDB, you should disable PostgreSQL-specific tests with: - + - `describe_only_db('mongo')` // will create a `describe` that runs only on mongoDB - `it_only_db('mongo')` // will make a test that only runs on mongo - `it_exclude_dbs(['postgres'])` // will make a test that runs against all DB's but postgres @@ -71,22 +71,32 @@ If your pull request introduces a change that may affect the storage or retrieva - `it_only_db('postgres')` // will make a test that only runs on postgres - `it_exclude_dbs(['mongo'])` // will make a test that runs against all DB's but mongo -#### Run a Parse Postgres with Docker +#### Run Postgres setup for Parse with Docker -To launch the compatible Postgres instance, copy and paste the following line into your shell: +[PostGIS images (select one with v2.2 or higher) on docker dashboard](https://hub.docker.com/r/postgis/postgis) is based off of the official [postgres](https://registry.hub.docker.com/_/postgres/) image and will work out-of-the-box (as long as you create a user with the necessary extensions for each of your Parse databases; see below). To launch the compatible Postgres instance, copy and paste the following line into your shell: -```sh -docker run -d --name parse-postgres -p 5432:5432 -e POSTGRES_USER=$USER --rm mdillon/postgis:11-alpine && sleep 5 && docker exec -it parse-postgres psql -U $USER -c 'create database parse_server_postgres_adapter_test_database;' && docker exec -it parse-postgres psql -U $USER -c 'CREATE EXTENSION postgis;' -d parse_server_postgres_adapter_test_database && docker exec -it parse-postgres psql -U $USER -c 'CREATE EXTENSION postgis_topology;' -d parse_server_postgres_adapter_test_database +``` +docker run -d --name parse-postgres -p 5432:5432 -e POSTGRES_PASSWORD=password --rm postgis/postgis:11-3.0-alpine && sleep 20 && docker exec -it parse-postgres psql -U postgres -c 'CREATE DATABASE parse_server_postgres_adapter_test_database;' && docker exec -it parse-postgres psql -U postgres -c 'CREATE EXTENSION postgis;' -d parse_server_postgres_adapter_test_database && docker exec -it parse-postgres psql -U postgres -c 'CREATE EXTENSION postgis_topology;' -d parse_server_postgres_adapter_test_database ``` To stop the Postgres instance: -```sh +``` docker stop parse-postgres ``` +You can also use the [postgis/postgis:11-2.5-alpine](https://hub.docker.com/r/postgis/postgis) image in a Dockerfile and copy this [script](https://github.com/parse-community/parse-server/blob/master/scripts/before_script_postgres.sh) to the image by adding the following lines: + +``` +#Install additional scripts. These are run in abc order during initial start +COPY ./scripts/setup-dbs.sh /docker-entrypoint-initdb.d/setup-dbs.sh +RUN chmod +x /docker-entrypoint-initdb.d/setup-dbs.sh +``` + +Note that the script above will ONLY be executed during initialization of the container with no data in the database, see the official [Postgres image](https://hub.docker.com/_/postgres) for details. If you want to use the script to run again be sure there is no data in the /var/lib/postgresql/data of the container. + ### Generate Parse Server Config Definition -If you want to make changes to [Parse Server Configuration][config] add the desired configuration to [src/Options/index.js][config-index] and run `npm run definitions`. This will output [src/Options/Definitions.js][config-def] and [src/Options/docs.js][config-docs]. +If you want to make changes to [Parse Server Configuration][config] add the desired configuration to [src/Options/index.js][config-index] and run `npm run definitions`. This will output [src/Options/Definitions.js][config-def] and [src/Options/docs.js][config-docs]. To view docs run `npm run docs` and check the `/out` directory. diff --git a/README.md b/README.md index b935b83e03..b5e5d43d26 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ The fastest and easiest way to get started is to run MongoDB and Parse Server lo Before you start make sure you have installed: - [NodeJS](https://www.npmjs.com/) that includes `npm` -- [MongoDB](https://www.mongodb.com/) or [PostgreSQL](https://www.postgresql.org/) +- [MongoDB](https://www.mongodb.com/) or [PostgreSQL](https://www.postgresql.org/)(with [PostGIS](https://postgis.net) 2.2.0 or higher) - Optionally [Docker](https://www.docker.com/) ### Locally diff --git a/package-lock.json b/package-lock.json index 09a193d4a5..6f8c2e0d31 100644 --- a/package-lock.json +++ b/package-lock.json @@ -51,6 +51,7 @@ "dev": true, "requires": { "chokidar": "^2.1.8", + "commander": "^4.0.1", "convert-source-map": "^1.1.0", "fs-readdir-recursive": "^1.1.0", "glob": "^7.0.0", @@ -60,6 +61,12 @@ "source-map": "^0.5.0" }, "dependencies": { + "commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true + }, "source-map": { "version": "0.5.7", "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", @@ -10236,6 +10243,11 @@ "version": "3.3.3", "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.3.tgz", "integrity": "sha512-pW0No1RGHgzlpHJO1nsVrHKpOEIxkGg1xB+v0ZmdNH5OAeAwzAVrCnI2/6Mtx+Uys6iaylxa+D3g4j63IKKjSQ==" + }, + "ws": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-7.2.1.tgz", + "integrity": "sha512-sucePNSafamSKoOqoNfBd8V0StlkzJKL2ZAhGQinCfNQ+oacw+Pk7lcdAElecBF2VkLNZRiIb5Oi1Q5lVUVt2A==" } } }, diff --git a/spec/PostgresStorageAdapter.spec.js b/spec/PostgresStorageAdapter.spec.js index 0ec500cff1..6c91c6626e 100644 --- a/spec/PostgresStorageAdapter.spec.js +++ b/spec/PostgresStorageAdapter.spec.js @@ -86,6 +86,7 @@ describe_only_db('postgres')('PostgresStorageAdapter', () => { expect(columns).toContain('columnA'); expect(columns).toContain('columnB'); expect(columns).toContain('columnC'); + done(); }) .catch(error => done.fail(error)); @@ -145,6 +146,206 @@ describe_only_db('postgres')('PostgresStorageAdapter', () => { undefined ); }); + + it('should use index for caseInsensitive query using Postgres', async () => { + const tableName = '_User'; + const schema = { + fields: { + objectId: { type: 'String' }, + username: { type: 'String' }, + email: { type: 'String' }, + }, + }; + const client = adapter._client; + await adapter.createTable(tableName, schema); + await client.none( + 'INSERT INTO $1:name ($2:name, $3:name) VALUES ($4, $5)', + [tableName, 'objectId', 'username', 'Bugs', 'Bunny'] + ); + //Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's + await client.none( + 'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)', + [tableName, 'objectId', 'username'] + ); + const caseInsensitiveData = 'bugs'; + const originalQuery = + 'SELECT * FROM $1:name WHERE lower($2:name)=lower($3)'; + const analyzedExplainQuery = adapter.createExplainableQuery( + originalQuery, + true + ); + await client + .one(analyzedExplainQuery, [tableName, 'objectId', caseInsensitiveData]) + .then(explained => { + const preIndexPlan = explained; + + preIndexPlan['QUERY PLAN'].forEach(element => { + //Make sure search returned with only 1 result + expect(element.Plan['Actual Rows']).toBe(1); + expect(element.Plan['Node Type']).toBe('Seq Scan'); + }); + const indexName = 'test_case_insensitive_column'; + + adapter + .ensureIndex(tableName, schema, ['objectId'], indexName, true) + .then(() => { + client + .one(analyzedExplainQuery, [ + tableName, + 'objectId', + caseInsensitiveData, + ]) + .then(explained => { + const postIndexPlan = explained; + + postIndexPlan['QUERY PLAN'].forEach(element => { + //Make sure search returned with only 1 result + expect(element.Plan['Actual Rows']).toBe(1); + //Should not be a sequential scan + expect(element.Plan['Node Type']).not.toContain('Seq Scan'); + + //Should be using the index created for this + element.Plan.Plans.forEach(innerElement => { + expect(innerElement['Index Name']).toBe(indexName); + }); + }); + + //These are the same query so should be the same size + for (let i = 0; i < preIndexPlan['QUERY PLAN'].length; i++) { + //Sequential should take more time to execute than indexed + expect( + preIndexPlan['QUERY PLAN'][i]['Execution Time'] + ).toBeGreaterThan( + postIndexPlan['QUERY PLAN'][i]['Execution Time'] + ); + } + + //Test explaining without analyzing + const basicExplainQuery = adapter.createExplainableQuery( + originalQuery + ); + client + .one(basicExplainQuery, [ + tableName, + 'objectId', + caseInsensitiveData, + ]) + .then(explained => { + explained['QUERY PLAN'].forEach(element => { + //Check that basic query plans isn't a sequential scan + expect(element.Plan['Node Type']).not.toContain( + 'Seq Scan' + ); + + //Basic query plans shouldn't have an execution time + expect(element['Execution Time']).toBeUndefined(); + }); + }); + }); + }); + }) + .catch(error => { + // Query on non existing table, don't crash + if (error.code !== '42P01') { + throw error; + } + return []; + }); + }); + + it('should use index for caseInsensitive query', async () => { + const tableName = '_User'; + const user = new Parse.User(); + user.set('username', 'Bugs'); + user.set('password', 'Bunny'); + await user.signUp(); + const database = Config.get(Parse.applicationId).database; + + //Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's + const client = adapter._client; + await client.none( + 'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)', + [tableName, 'objectId', 'username'] + ); + const caseInsensitiveData = 'bugs'; + const fieldToSearch = 'username'; + //Check using find method for Parse + const preIndexPlan = await database.find( + tableName, + { username: caseInsensitiveData }, + { caseInsensitive: true, explain: true } + ); + + preIndexPlan.forEach(element => { + element['QUERY PLAN'].forEach(innerElement => { + //Check that basic query plans isn't a sequential scan, be careful as find uses "any" to query + expect(innerElement.Plan['Node Type']).toBe('Seq Scan'); + //Basic query plans shouldn't have an execution time + expect(innerElement['Execution Time']).toBeUndefined(); + }); + }); + + const indexName = 'test_case_insensitive_column'; + const schema = await new Parse.Schema('_User').get(); + await adapter.ensureIndex( + tableName, + schema, + [fieldToSearch], + indexName, + true + ); + + //Check using find method for Parse + const postIndexPlan = await database.find( + tableName, + { username: caseInsensitiveData }, + { caseInsensitive: true, explain: true } + ); + + postIndexPlan.forEach(element => { + element['QUERY PLAN'].forEach(innerElement => { + //Check that basic query plans isn't a sequential scan + expect(innerElement.Plan['Node Type']).not.toContain('Seq Scan'); + + //Basic query plans shouldn't have an execution time + expect(innerElement['Execution Time']).toBeUndefined(); + }); + }); + }); + + it('should use index for caseInsensitive query using default indexname', async () => { + const tableName = '_User'; + const user = new Parse.User(); + user.set('username', 'Bugs'); + user.set('password', 'Bunny'); + await user.signUp(); + const database = Config.get(Parse.applicationId).database; + const fieldToSearch = 'username'; + //Create index before data is inserted + const schema = await new Parse.Schema('_User').get(); + await adapter.ensureIndex(tableName, schema, [fieldToSearch], null, true); + + //Postgres won't take advantage of the index until it has a lot of records because sequential is faster for small db's + const client = adapter._client; + await client.none( + 'INSERT INTO $1:name ($2:name, $3:name) SELECT MD5(random()::text), MD5(random()::text) FROM generate_series(1,5000)', + [tableName, 'objectId', 'username'] + ); + + const caseInsensitiveData = 'buGs'; + //Check using find method for Parse + const indexPlan = await database.find( + tableName, + { username: caseInsensitiveData }, + { caseInsensitive: true, explain: true } + ); + indexPlan.forEach(element => { + element['QUERY PLAN'].forEach(innerElement => { + expect(innerElement.Plan['Node Type']).not.toContain('Seq Scan'); + expect(innerElement.Plan['Index Name']).toContain('parse_default'); + }); + }); + }); }); describe_only_db('postgres')('PostgresStorageAdapter shutdown', () => { diff --git a/src/Adapters/Storage/Postgres/PostgresStorageAdapter.js b/src/Adapters/Storage/Postgres/PostgresStorageAdapter.js index a3f34dc50b..8c7498d58d 100644 --- a/src/Adapters/Storage/Postgres/PostgresStorageAdapter.js +++ b/src/Adapters/Storage/Postgres/PostgresStorageAdapter.js @@ -852,6 +852,15 @@ export class PostgresStorageAdapter implements StorageAdapter { this.canSortOnJoinTables = false; } + //Note that analyze=true will run the query, executing INSERTS, DELETES, etc. + createExplainableQuery (query: string, analyze:boolean = false) { + if (analyze){ + return 'EXPLAIN (ANALYZE, FORMAT JSON) ' + query; + }else{ + return 'EXPLAIN (FORMAT JSON) ' + query; + } + } + handleShutdown() { if (!this._client) { return; @@ -1835,7 +1844,7 @@ export class PostgresStorageAdapter implements StorageAdapter { className: string, schema: SchemaType, query: QueryType, - { skip, limit, sort, keys, caseInsensitive }: QueryOptions + { skip, limit, sort, keys, caseInsensitive, explain }: QueryOptions ) { debug('find', className, query, { skip, @@ -1843,6 +1852,7 @@ export class PostgresStorageAdapter implements StorageAdapter { sort, keys, caseInsensitive, + explain, }); const hasLimit = limit !== undefined; const hasSkip = skip !== undefined; @@ -1912,7 +1922,8 @@ export class PostgresStorageAdapter implements StorageAdapter { values = values.concat(keys); } - const qs = `SELECT ${columns} FROM $1:name ${wherePattern} ${sortPattern} ${limitPattern} ${skipPattern}`; + const originalQuery = `SELECT ${columns} FROM $1:name ${wherePattern} ${sortPattern} ${limitPattern} ${skipPattern}`; + const qs = explain ? this.createExplainableQuery(originalQuery) : originalQuery; debug(qs, values); return this._client .any(qs, values) @@ -1923,10 +1934,13 @@ export class PostgresStorageAdapter implements StorageAdapter { } return []; }) - .then(results => - results.map(object => + .then(results => { + if (explain){ + return results; + } + return results.map(object => this.postgresObjectToParseObject(className, object, schema) - ) + );} ); } @@ -2184,8 +2198,14 @@ export class PostgresStorageAdapter implements StorageAdapter { ); } - async aggregate(className: string, schema: any, pipeline: any) { - debug('aggregate', className, pipeline); + async aggregate( + className: string, + schema: any, + pipeline: any, + readPreference: ?string, + hint: ?mixed, + explain?: boolean) { + debug('aggregate', className, pipeline, readPreference, hint, explain); const values = [className]; let index: number = 2; let columns: string[] = []; @@ -2369,14 +2389,18 @@ export class PostgresStorageAdapter implements StorageAdapter { sort !== undefined && sorting.length > 0 ? `ORDER BY ${sorting}` : ''; } } - - const qs = `SELECT ${columns.join()} FROM $1:name ${wherePattern} ${sortPattern} ${limitPattern} ${skipPattern} ${groupPattern}`; + const originalQuery = `SELECT ${columns.join()} FROM $1:name ${wherePattern} ${sortPattern} ${limitPattern} ${skipPattern} ${groupPattern}`; + const qs = explain ? this.createExplainableQuery(originalQuery) : originalQuery; debug(qs, values); return this._client - .map(qs, values, a => - this.postgresObjectToParseObject(className, a, schema) - ) - .then(results => { + .any(qs, values) + .then(a => { + if (explain){ + return a; + } + const results = a.map(object => + this.postgresObjectToParseObject(className, object, schema) + ); results.forEach(result => { if (!Object.prototype.hasOwnProperty.call(result, 'objectId')) { result.objectId = null; @@ -2523,9 +2547,41 @@ export class PostgresStorageAdapter implements StorageAdapter { return result; } - // TODO: implement? - ensureIndex(): Promise { - return Promise.resolve(); + async ensureIndex( + className: string, + schema: SchemaType, + fieldNames: string[], + indexName: ?string, + caseInsensitive: boolean = false, + conn: ?any = null + ): Promise { + + conn = conn != null ? conn : this._client; + const defaultIndexName = `parse_default_${fieldNames.sort().join('_')}`; + const indexNameOptions: Object = indexName != null ? { name: indexName } : { name: defaultIndexName }; + const constraintPatterns = caseInsensitive ? fieldNames.map((fieldName, index) => `lower($${index + 3}:name) varchar_pattern_ops`) : + fieldNames.map((fieldName, index) => `$${index + 3}:name`); + const qs = `CREATE INDEX $1:name ON $2:name (${constraintPatterns.join()})`; + await conn.none(qs, [indexNameOptions.name, className, ...fieldNames]) + .catch(error => { + if ( + error.code === PostgresDuplicateRelationError && + error.message.includes(indexNameOptions.name) + ) { + // Index already exists. Ignore error. + } else if ( + error.code === PostgresUniqueIndexViolationError && + error.message.includes(indexNameOptions.name) + ) { + // Cast the error into the proper parse error + throw new Parse.Error( + Parse.Error.DUPLICATE_VALUE, + 'A duplicate value for a field with unique values was provided' + ); + } else { + throw error; + } + }); } }