Edit: solution below.
Having:
-- $ postgrest-with-postgresql-15 psql -- on nix-shell
create table chat_messages(
id text PRIMARY KEY
, content text
, workspace uuid
);
create or replace function app_metadata()
returns jsonb as $$
select ((current_setting('request.jwt.claims', true)::jsonb)->'app_metadata');
$$ language sql stable;
alter table chat_messages enable row level security;
drop policy if exists sel on chat_messages;
create policy sel on chat_messages for select
using(
workspace = (app_metadata() ->> 'workspace')::uuid
);
grant select on chat_messages to postgrest_test_anonymous;
insert into chat_messages(id, content, workspace)
select
'id-' || x,
'content-' || x,
case when x % 2 = 0
then 'b080c0f7-8c22-429c-9d1a-ade3e43fb109'::uuid
else '2156a8d9-d06f-40cd-adf8-411ae8c0bc76'::uuid
end
from generate_series(1, 3000000) x;
Doing a count is fast without RLS:
explain analyze SELECT COUNT(*) FROM chat_messages;
Planning Time: 0.105 ms
Execution Time: 161.549 ms
When RLS is enabled, the count is awfully slow:
begin;
set local role to postgrest_test_anonymous;
select set_config('request.jwt.claims', '{"aal":"aal1","amr":[{"method":"password","timestamp":1670987059}],"app_metadata":{"provider":"email","team":"T01TQ1XH8RW","workspace":"2156a8d9-d06f-40cd-adf8-411ae8c0bc76","workspace_int":195,"workspace_small_text":"195"},"aud":"authenticated","email":"[email protected]","exp":1671591861,"phone":"","role":"authenticated","session_id":"9033fca4-07e0-42ff-9e2f-0b0aa738e3d5","sub":"ef79d643-550e-4e65-a963-ad6f50e67dfe","user_metadata":{}}', true);
SELECT COUNT(*) FROM chat_messages;
count
---------
1500000
explain analyze SELECT COUNT(*) FROM chat_messages;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------
Aggregate (cost=90821.44..90821.45 rows=1 width=8) (actual time=18597.007..18597.008 rows=1 loops=1)
-> Seq Scan on chat_messages (cost=0.00..90795.25 rows=10476 width=0) (actual time=18597.002..18597.002 rows=0 loops=1)
Filter: (workspace = ((((current_setting('request.jwt.claims'::text, true))::jsonb -> 'app_metadata'::text) ->> 'workspace'::text))::uuid)
Rows Removed by Filter: 3000000
Planning Time: 0.783 ms
Execution Time: 18597.062 ms
This happens because:
- jsonb casting(
::jsonb) is not LEAKPROOF , this means that the casting will be done for each row on the query, PostgreSQL doesn't cache this static value.
- The
json value inside the request.jwt.claims is somewhat lengthy. We have no control over this as the JWT can come from an external system. A shorter jsonclaims will lead to less time:
begin;
set local role to postgrest_test_anonymous;
select set_config('request.jwt.claims', '{"app_metadata":{"workspace":"2156a8d9-d06f-40cd-adf8-411ae8c0bc76"}}', true);
explain analyze SELECT COUNT(*) FROM chat_messages;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------
Aggregate (cost=117937.00..117937.01 rows=1 width=8) (actual time=3449.217..3449.218 rows=1 loops=1)
-> Seq Scan on chat_messages (cost=0.00..117937.00 rows=1 width=0) (actual time=3449.213..3449.213 rows=0 loops=1)
Filter: (workspace = ((((current_setting('request.jwt.claims'::text, true))::jsonb -> 'app_metadata'::text) ->> 'workspace'::text))::uuid)
Rows Removed by Filter: 3000000
Planning Time: 0.259 ms
Execution Time: 3449.257 ms
The count is a contrived example(for which there are workarounds) but more real queries will lead to the same problem.
References:
Edit: solution below.
Having:
Doing a count is fast without RLS:
When RLS is enabled, the count is awfully slow:
This happens because:
::jsonb) is not LEAKPROOF , this means that the casting will be done for each row on the query, PostgreSQL doesn't cache this static value.jsonvalue inside therequest.jwt.claimsis somewhat lengthy. We have no control over this as the JWT can come from an external system. A shorterjsonclaims will lead to less time:The
countis a contrived example(for which there are workarounds) but more real queries will lead to the same problem.References: