Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Experiment with functional testing #575

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 23 additions & 10 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,20 @@ jobs:
- image: cimg/python:3.9.9
- image: circleci/postgres:9.6.5-alpine-ram

environment:
POSTGRES_TEST_HOST: localhost
POSTGRES_TEST_USER: root
POSTGRES_TEST_PASS: ''
POSTGRES_TEST_PORT: 5432
POSTGRES_TEST_DBNAME: circle_test

steps:
- checkout
- run:
name: "Run Tests - Postgres"
environment:
POSTGRES_TEST_HOST: localhost
POSTGRES_TEST_USER: root
POSTGRES_TEST_PASS: ''
POSTGRES_TEST_PORT: 5432
POSTGRES_TEST_DBNAME: circle_test
name: "Run Functional Tests - Postgres"
command: ./run_functional_test.sh postgres
- run:
name: "Run OG Tests - Postgres"
command: ./run_test.sh postgres
- store_artifacts:
path: ./logs
Expand All @@ -28,7 +32,10 @@ jobs:
steps:
- checkout
- run:
name: "Run Tests - Redshift"
name: "Run Functional Tests - Redshift"
command: ./run_functional_test.sh redshift
- run:
name: "Run OG Tests - Redshift"
command: ./run_test.sh redshift
- store_artifacts:
path: ./logs
Expand All @@ -39,7 +46,10 @@ jobs:
steps:
- checkout
- run:
name: "Run Tests - Snowflake"
name: "Run Functional Tests - Snowflake"
command: ./run_functional_test.sh snowflake
- run:
name: "Run OG Tests - Snowflake"
command: ./run_test.sh snowflake
- store_artifacts:
path: ./logs
Expand All @@ -55,7 +65,10 @@ jobs:
name: "Set up credentials"
command: echo $BIGQUERY_SERVICE_ACCOUNT_JSON > ${HOME}/bigquery-service-key.json
- run:
name: "Run Tests - BigQuery"
name: "Run Functional Tests - BigQuery"
command: ./run_functional_test.sh bigquery
- run:
name: "Run OG Tests - BigQuery"
command: ./run_test.sh bigquery
- store_artifacts:
path: ./logs
Expand Down
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,6 @@ dbt_modules/
dbt_packages/
logs/
venv/
env/
test.env
__pycache__
3 changes: 3 additions & 0 deletions dev-requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
pytest
pytest-dotenv
dbt-tests-adapter
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Realized this isn't needed! Just installed out of habit

8 changes: 8 additions & 0 deletions pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
[pytest]
filterwarnings =
ignore:.*'soft_unicode' has been renamed to 'soft_str'*:DeprecationWarning
ignore:unclosed file .*:ResourceWarning
env_files =
test.env
testpaths =
tests/functional
13 changes: 13 additions & 0 deletions run_functional_test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
#!/bin/bash
VENV="venv/bin/activate"

if [[ ! -f $VENV ]]; then
python3 -m venv venv
. $VENV

pip install --upgrade pip setuptools
pip install --pre "dbt-$1" -r dev-requirements.txt
fi

. $VENV
python3 -m pytest tests/functional --profile $1
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Once all the setup is sorted, this is really it:

python3 -m pytest tests/functional --profile postgres

Empty file added tests/__init__.py
Empty file.
98 changes: 98 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
import pytest
import os

pytest_plugins = ["dbt.tests.fixtures.project"]


def pytest_addoption(parser):
parser.addoption("--profile", action="store", default="apache_spark", type=str)


# Using @pytest.mark.skip_adapter('apache_spark') uses the 'skip_by_adapter_type'
# autouse fixture below
def pytest_configure(config):
config.addinivalue_line(
"markers",
"skip_profile(profile): skip test for the given profile",
)
config.addinivalue_line(
"markers",
"only_profile(profile): only test the given profile",
)


@pytest.fixture(scope="session")
def dbt_profile_target(request):
profile_type = request.config.getoption("--profile")
if profile_type == "postgres":
target = postgres_target()
elif profile_type == "redshift":
target = redshift_target()
elif profile_type == "snowflake":
target = snowflake_target()
elif profile_type == "bigquery":
target = bigquery_target()
else:
raise ValueError(f"Invalid profile type '{profile_type}'")
return target


def postgres_target():
return {
"type": "postgres",
"host": os.getenv('POSTGRES_TEST_HOST'),
"user": os.getenv('POSTGRES_TEST_USER'),
"pass": os.getenv('POSTGRES_TEST_PASS'),
"port": int(os.getenv('POSTGRES_TEST_PORT')),
"dbname": os.getenv('POSTGRES_TEST_DBNAME'),
}


def redshift_target():
return {
"type": "redshift",
"host": os.getenv('REDSHIFT_TEST_HOST'),
"user": os.getenv('REDSHIFT_TEST_USER'),
"pass": os.getenv('REDSHIFT_TEST_PASS'),
"port": int(os.getenv('REDSHIFT_TEST_PORT')),
"dbname": os.getenv('REDSHIFT_TEST_DBNAME'),
}


def bigquery_target():
return {
"type": "bigquery",
"method": "service-account",
"keyfile": os.getenv('BIGQUERY_SERVICE_KEY_PATH'),
"project": os.getenv('BIGQUERY_TEST_DATABASE'),
}


def snowflake_target():
return {
"type": "snowflake",
"account": os.getenv('SNOWFLAKE_TEST_ACCOUNT'),
"user": os.getenv('SNOWFLAKE_TEST_USER'),
"password": os.getenv('SNOWFLAKE_TEST_PASSWORD'),
"role": os.getenv('SNOWFLAKE_TEST_ROLE'),
"database": os.getenv('SNOWFLAKE_TEST_DATABASE'),
"warehouse": os.getenv('SNOWFLAKE_TEST_WAREHOUSE'),
}


@pytest.fixture(autouse=True)
def skip_by_profile_type(request):
profile_type = request.config.getoption("--profile")
if request.node.get_closest_marker("skip_profile"):
for skip_profile_type in request.node.get_closest_marker("skip_profile").args:
if skip_profile_type == profile_type:
pytest.skip("skipped on '{profile_type}' profile")


@pytest.fixture(autouse=True)
def only_profile_type(request):
profile_type = request.config.getoption("--profile")
if request.node.get_closest_marker("only_profile"):
for only_profile_type in request.node.get_closest_marker("only_profile").args:
if only_profile_type != profile_type:
pytest.skip("skipped on '{profile_type}' profile")
30 changes: 30 additions & 0 deletions tests/functional/cross_db_utils/base_cross_db_macro.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import os
import pytest
from dbt.tests.util import run_dbt
from tests.functional.cross_db_utils.fixture_cross_db_macro import (
macros__test_assert_equal_sql,
)


class BaseCrossDbMacro:
# install this repo as a package!
@pytest.fixture(scope="class")
def packages(self):
return {"packages": [{"local": os.getcwd()}]}

# setup
@pytest.fixture(scope="class")
def macros(self):
return {"test_assert_equal.sql": macros__test_assert_equal_sql}

# each child class will reimplement 'models' + 'seeds'
def seeds(self):
return {}

def models(self):
return {}

# actual test sequence
def test_build_assert_equal(self, project):
run_dbt(['deps'])
run_dbt(['build']) # seed, model, test -- all handled by dbt
61 changes: 61 additions & 0 deletions tests/functional/cross_db_utils/fixture_any_value.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@

# any_value

seeds__data_any_value_csv = """id,key_name,static_col
1,abc,dbt
2,abc,dbt
3,jkl,dbt
4,jkl,dbt
5,jkl,dbt
6,xyz,test
"""


seeds__data_any_value_expected_csv = """key_name,static_col,num_rows
abc,dbt,2
jkl,dbt,3
xyz,test,1
"""


models__test_any_value_sql = """
with data as (

select * from {{ ref('data_any_value') }}

),

data_output as (

select * from {{ ref('data_any_value_expected') }}

),

calculate as (
select
key_name,
{{ dbt_utils.any_value('static_col') }} as static_col,
count(id) as num_rows
from data
group by key_name
)

select
calculate.num_rows as actual,
data_output.num_rows as expected
from calculate
left join data_output
on calculate.key_name = data_output.key_name
and calculate.static_col = data_output.static_col
"""


models__test_any_value_yml = """
version: 2
models:
- name: test_any_value
tests:
- assert_equal:
actual: actual
expected: expected
"""
63 changes: 63 additions & 0 deletions tests/functional/cross_db_utils/fixture_bool_or.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@

# bool_or

seeds__data_bool_or_csv = """key,val1,val2
abc,1,1
abc,1,0
def,1,0
hij,1,1
hij,1,
klm,1,0
klm,1,
"""


seeds__data_bool_or_expected_csv = """key,value
abc,true
def,false
hij,true
klm,false
"""


models__test_bool_or_sql = """
with data as (

select * from {{ ref('data_bool_or') }}

),

data_output as (

select * from {{ ref('data_bool_or_expected') }}

),

calculate as (

select
key,
{{ dbt_utils.bool_or('val1 = val2') }} as value
from data
group by key

)

select
calculate.value as actual,
data_output.value as expected
from calculate
left join data_output
on calculate.key = data_output.key
"""


models__test_bool_or_yml = """
version: 2
models:
- name: test_bool_or
tests:
- assert_equal:
actual: actual
expected: expected
"""
30 changes: 30 additions & 0 deletions tests/functional/cross_db_utils/fixture_cast_bool_to_text.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@

# cast_bool_to_text

models__test_cast_bool_to_text_sql = """
with data as (

select 0=1 as input, 'false' as expected union all
select 1=1 as input, 'true' as expected union all
select null as input, null as expected

)

select

{{ dbt_utils.cast_bool_to_text("input") }} as actual,
expected

from data
"""


models__test_cast_bool_to_text_yml = """
version: 2
models:
- name: test_cast_bool_to_text
tests:
- assert_equal:
actual: actual
expected: expected
"""
Loading