Issue while running Great expectation validations on Big Query External Tables

great-expectations version == 0.18.9
Environment = Running on a Linux VM

Issue: The following expectations are passing the validation even though as per the data it should have some unexpected counts.
expect_column_values_to_be_in_set
expect_column_values_to_be_between
expect_column_value_lengths_to_equal
expect_column_value_lengths_to_be_between
expect_select_column_values_to_be_unique_within_record
expect_column_pair_values_to_be_equal
expect_column_pair_values_a_to_be_greater_than_b
expect_column_values_to_not_be_null
expect_column_values_to_be_null
expect_column_values_to_not_be_in_set
expect_column_values_to_be_unique
expect_compound_columns_to_be_unique
expect_column_values_to_not_match_like_pattern
expect_column_values_to_match_like_pattern
expect_column_pair_values_to_be_in_set
expect_multicolumn_sum_to_equal

This issue is coming only when i am running the validation on a Big Query External table. When i create a normal Big Query table with the exact same data and execute the same check on it, it works as expected. It returns “False” for the check that should have some unexpected counts as per the data. However, for the same data and same check, it returns “True” for Big Query External table.

Code used:

from datetime import datetime
import pandas as pd
from great_expectations.checkpoint import SimpleCheckpoint
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest

def database_config():
import great_expectations as gx
import os
os.environ[
‘GOOGLE_APPLICATION_CREDENTIALS’] = r’cred.json’
projectID = ‘my_id’
bigquery_dataset = ‘my_dataset’
context = gx.get_context()
my_connection_string = f"bigquery://{projectID}/{bigquery_dataset}"
datasource_name = “my_bigquery_datasource”
datasource = context.sources.add_or_update_sql(
name=datasource_name,
connection_string=my_connection_string,
)
return datasource, context, datasource_name

def run(id):
datasource, context, datasource_name = database_config()

project_id = ""
schema_name = ""
table_name = ""

run_name = f"{id}"
run_time = datetime.now()

run_id = {
    "run_name": run_name,
    "run_time": run_time
}

start_time = datetime.now()
expectation_suite_name = f"{id}"
expectation_code = 'expect_column_values_to_be_in_set("Current_Foreclosure_Status", ["1", "3"])'


query_filter = f"select * from {project_id}.{schema_name}.{table_name}"

asset_name = f"{run_name}_my_query_asset"

query_asset = datasource.add_query_asset(name=asset_name, query=query_filter)

data_asset = context.get_datasource(datasource_name).get_asset(asset_name)

batch_request = data_asset.build_batch_request()

context.add_or_update_expectation_suite(expectation_suite_name)

validator = context.get_validator(
    batch_request=batch_request,
    expectation_suite_name=expectation_suite_name,
)

s = "validator." + expectation_code
eval(s)

validator.save_expectation_suite(
    discard_failed_expectations=False)

checkpoint = context.add_or_update_checkpoint(
    name="my_checkpoint",
    validations=[
        {
            "batch_request": batch_request,
            "expectation_suite_name": expectation_suite_name,
        },
    ],
)

checkpoint_result = checkpoint.run()
print(checkpoint_result)

Note: this is a sample code. I am fetching the expectation_code dynamically from a table in my database and executing the validation for each expectation in a for loop.