From 1827e486069f24742387cdb00422c164311192f2 Mon Sep 17 00:00:00 2001 From: Jannis Leidel Date: Fri, 18 Oct 2019 09:40:45 +0200 Subject: [PATCH] Revert some changes 2to3 tends to do (#4261) - Revert some changes 2to3 tends to do when it errs on the side of caution regarding dict view objects. - Also fixed some naming issues with one character variables in list comprehensions. - Fix Flask warning. --- bin/release_manager.py | 1 + redash/app.py | 2 +- redash/cli/rq.py | 2 +- redash/handlers/base.py | 2 +- redash/handlers/data_sources.py | 2 +- redash/handlers/destinations.py | 2 +- redash/handlers/queries.py | 2 +- redash/models/__init__.py | 6 +++--- redash/models/parameterized_query.py | 10 +++++----- redash/monitor.py | 2 +- redash/query_runner/__init__.py | 2 +- redash/query_runner/athena.py | 2 +- redash/query_runner/axibase_tsd.py | 2 +- redash/query_runner/azure_kusto.py | 2 +- redash/query_runner/cass.py | 2 +- redash/query_runner/db2.py | 2 +- redash/query_runner/dgraph.py | 4 ++-- redash/query_runner/drill.py | 2 +- redash/query_runner/druid.py | 2 +- redash/query_runner/elasticsearch.py | 6 +++--- redash/query_runner/google_analytics.py | 2 +- redash/query_runner/google_spreadsheets.py | 4 ++-- redash/query_runner/hive_ds.py | 2 +- redash/query_runner/impala_ds.py | 2 +- redash/query_runner/influx_db.py | 2 +- redash/query_runner/jql.py | 2 +- redash/query_runner/kylin.py | 2 +- redash/query_runner/mapd.py | 2 +- redash/query_runner/memsql_ds.py | 4 ++-- redash/query_runner/mssql.py | 2 +- redash/query_runner/mssql_odbc.py | 2 +- redash/query_runner/mysql.py | 10 +++++++--- redash/query_runner/oracle.py | 2 +- redash/query_runner/pg.py | 2 +- redash/query_runner/phoenix.py | 2 +- redash/query_runner/presto.py | 2 +- redash/query_runner/prometheus.py | 8 ++++---- redash/query_runner/qubole.py | 2 +- redash/query_runner/query_results.py | 2 +- redash/query_runner/rockset.py | 2 +- redash/query_runner/salesforce.py | 2 +- redash/query_runner/snowflake.py | 2 +- redash/query_runner/sqlite.py | 2 +- redash/query_runner/treasuredata.py | 2 +- redash/query_runner/yandex_metrica.py | 4 ++-- redash/serializers/query_result.py | 18 +++++++++--------- redash/worker.py | 2 +- tests/extensions/test_extensions.py | 12 ++++++------ tests/factories.py | 2 +- tests/handlers/test_dashboards.py | 6 +++--- tests/handlers/test_data_sources.py | 4 ++-- tests/handlers/test_destinations.py | 6 +++--- tests/handlers/test_queries.py | 10 +++++----- tests/handlers/test_users.py | 2 +- tests/models/test_queries.py | 2 +- tests/query_runner/test_google_spreadsheets.py | 2 +- 56 files changed, 99 insertions(+), 94 deletions(-) diff --git a/bin/release_manager.py b/bin/release_manager.py index 4fda3a94bf..3d9b21c895 100644 --- a/bin/release_manager.py +++ b/bin/release_manager.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 import os import sys import re diff --git a/redash/app.py b/redash/app.py index e4e1335561..dcb8e6d9a9 100644 --- a/redash/app.py +++ b/redash/app.py @@ -10,7 +10,7 @@ def __init__(self, *args, **kwargs): kwargs.update({ 'template_folder': settings.STATIC_ASSETS_PATH, 'static_folder': settings.STATIC_ASSETS_PATH, - 'static_path': '/static', + 'static_url_path': '/static', }) super(Redash, self).__init__(__name__, *args, **kwargs) # Make sure we get the right referral address even behind proxies like nginx. diff --git a/redash/cli/rq.py b/redash/cli/rq.py index 9f6fcc5a0d..357f7cfd2d 100644 --- a/redash/cli/rq.py +++ b/redash/cli/rq.py @@ -23,7 +23,7 @@ def scheduler(): @argument('queues', nargs=-1) def worker(queues='default'): if not queues: - queues = ('default', ) + queues = ('default',) with Connection(rq_redis_connection): w = Worker(queues) w.work() diff --git a/redash/handlers/base.py b/redash/handlers/base.py index 763c65cadf..e4bc8e66b2 100644 --- a/redash/handlers/base.py +++ b/redash/handlers/base.py @@ -43,7 +43,7 @@ def record_event(self, options): # TODO: this should probably be somewhere else def update_model(self, model, updates): - for k, v in list(updates.items()): + for k, v in updates.items(): setattr(model, k, v) diff --git a/redash/handlers/data_sources.py b/redash/handlers/data_sources.py index a1617b741b..7ab2ffe43c 100644 --- a/redash/handlers/data_sources.py +++ b/redash/handlers/data_sources.py @@ -19,7 +19,7 @@ class DataSourceTypeListResource(BaseResource): @require_admin def get(self): - available_query_runners = [q for q in list(query_runners.values()) if not q.deprecated] + available_query_runners = [q for q in query_runners.values() if not q.deprecated] return [q.to_dict() for q in sorted(available_query_runners, key=lambda q: q.name())] diff --git a/redash/handlers/destinations.py b/redash/handlers/destinations.py index eaac7bd4a4..261da2bb39 100644 --- a/redash/handlers/destinations.py +++ b/redash/handlers/destinations.py @@ -13,7 +13,7 @@ class DestinationTypeListResource(BaseResource): @require_admin def get(self): - available_destinations = [q for q in list(destinations.values()) if not q.deprecated] + available_destinations = [q for q in destinations.values() if not q.deprecated] return [q.to_dict() for q in available_destinations] diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py index ba099d5a8d..1e53633aaa 100644 --- a/redash/handlers/queries.py +++ b/redash/handlers/queries.py @@ -336,7 +336,7 @@ def post(self, query_id): query_def['query_text'] = query_def.pop('query') if 'tags' in query_def: - query_def['tags'] = [_f for _f in query_def['tags'] if _f] + query_def['tags'] = [tag for tag in query_def['tags'] if tag] query_def['last_modified_by'] = self.current_user query_def['changed_by'] = self.current_user diff --git a/redash/models/__init__.py b/redash/models/__init__.py index 7c703a5c17..60aaecdb9a 100644 --- a/redash/models/__init__.py +++ b/redash/models/__init__.py @@ -218,7 +218,7 @@ def groups(self): groups = DataSourceGroup.query.filter( DataSourceGroup.data_source == self ) - return dict([(g.group_id, g.view_only) for g in groups]) + return dict([(group.group_id, group.view_only) for group in groups]) @generic_repr('id', 'data_source_id', 'group_id', 'view_only') @@ -543,8 +543,8 @@ def past_scheduled_queries(cls): .filter(Query.schedule.isnot(None)) .order_by(Query.id) ) - return [x for x in queries if x.schedule["until"] is not None and pytz.utc.localize( - datetime.datetime.strptime(x.schedule['until'], '%Y-%m-%d') + return [query for query in queries if query.schedule["until"] is not None and pytz.utc.localize( + datetime.datetime.strptime(query.schedule['until'], '%Y-%m-%d') ) <= now] @classmethod diff --git a/redash/models/parameterized_query.py b/redash/models/parameterized_query.py index 3ed593ec99..81ddde18c6 100644 --- a/redash/models/parameterized_query.py +++ b/redash/models/parameterized_query.py @@ -10,9 +10,9 @@ def _pluck_name_and_value(default_column, row): - row = {k.lower(): v for k, v in list(row.items())} - name_column = "name" if "name" in list(row.keys()) else default_column.lower() - value_column = "value" if "value" in list(row.keys()) else default_column.lower() + row = {k.lower(): v for k, v in row.items()} + name_column = "name" if "name" in row.keys() else default_column.lower() + value_column = "value" if "value" in row.keys() else default_column.lower() return {"name": row[name_column], "value": text_type(row[value_column])} @@ -73,7 +73,7 @@ def _parameter_names(parameter_values): names = [] for key, value in parameter_values.items(): if isinstance(value, dict): - for inner_key in list(value.keys()): + for inner_key in value.keys(): names.append('{}.{}'.format(key, inner_key)) else: names.append(key) @@ -170,7 +170,7 @@ def _valid(self, name, value): @property def is_safe(self): - text_parameters = [p for p in self.schema if p["type"] == "text"] + text_parameters = [param for param in self.schema if param["type"] == "text"] return not any(text_parameters) @property diff --git a/redash/monitor.py b/redash/monitor.py index 23ac287f90..f0d8abfa1b 100644 --- a/redash/monitor.py +++ b/redash/monitor.py @@ -103,7 +103,7 @@ def get_waiting_in_queue(queue_name): def parse_tasks(task_lists, state): rows = [] - for task in itertools.chain(*list(task_lists.values())): + for task in itertools.chain(*task_lists.values()): task_row = { 'state': state, 'task_name': task['name'], diff --git a/redash/query_runner/__init__.py b/redash/query_runner/__init__.py index e1b390ae35..a3d08145d0 100644 --- a/redash/query_runner/__init__.py +++ b/redash/query_runner/__init__.py @@ -148,7 +148,7 @@ def _get_tables(self, schema_dict): return [] def _get_tables_stats(self, tables_dict): - for t in list(tables_dict.keys()): + for t in tables_dict.keys(): if type(tables_dict[t]) == dict: res = self._run_query_internal('select count(*) as cnt from %s' % t) tables_dict[t]['size'] = res[0]['cnt'] diff --git a/redash/query_runner/athena.py b/redash/query_runner/athena.py index 6781aced73..52f32d3f33 100644 --- a/redash/query_runner/athena.py +++ b/redash/query_runner/athena.py @@ -223,7 +223,7 @@ def run_query(self, query, user): cursor.execute(query) column_tuples = [(i[0], _TYPE_MAPPINGS.get(i[1], None)) for i in cursor.description] columns = self.fetch_columns(column_tuples) - rows = [dict(list(zip(([c['name'] for c in columns]), r))) for i, r in enumerate(cursor.fetchall())] + rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())] qbytes = None athena_query_id = None try: diff --git a/redash/query_runner/axibase_tsd.py b/redash/query_runner/axibase_tsd.py index 6e40af07ee..24aa5f3321 100644 --- a/redash/query_runner/axibase_tsd.py +++ b/redash/query_runner/axibase_tsd.py @@ -69,7 +69,7 @@ def generate_rows_and_columns(csv_response): 'name': i['name']} for i in meta_columns] column_names = [c['name'] for c in columns] - rows = [dict(list(zip(column_names, row))) for row in reader] + rows = [dict(zip(column_names, row)) for row in reader] return columns, rows diff --git a/redash/query_runner/azure_kusto.py b/redash/query_runner/azure_kusto.py index 93f33fd86d..fa8c4a85f3 100644 --- a/redash/query_runner/azure_kusto.py +++ b/redash/query_runner/azure_kusto.py @@ -137,7 +137,7 @@ def get_schema(self, get_stats=False): results = json_loads(results) schema_as_json = json_loads(results['rows'][0]['DatabaseSchema']) - tables_list = list(schema_as_json['Databases'][self.configuration['database']]['Tables'].values()) + tables_list = schema_as_json['Databases'][self.configuration['database']]['Tables'].values() schema = {} diff --git a/redash/query_runner/cass.py b/redash/query_runner/cass.py index 9fadff0e4a..e56aad703a 100644 --- a/redash/query_runner/cass.py +++ b/redash/query_runner/cass.py @@ -128,7 +128,7 @@ def run_query(self, query, user): columns = self.fetch_columns([(c, 'string') for c in column_names]) - rows = [dict(list(zip(column_names, row))) for row in result] + rows = [dict(zip(column_names, row)) for row in result] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data, cls=CassandraJSONEncoder) diff --git a/redash/query_runner/db2.py b/redash/query_runner/db2.py index c63f84fada..8f2257a1e3 100644 --- a/redash/query_runner/db2.py +++ b/redash/query_runner/db2.py @@ -123,7 +123,7 @@ def run_query(self, query, user): if cursor.description is not None: columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor] + rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor] data = {'columns': columns, 'rows': rows} error = None diff --git a/redash/query_runner/dgraph.py b/redash/query_runner/dgraph.py index ce771df491..3bf68c82d2 100644 --- a/redash/query_runner/dgraph.py +++ b/redash/query_runner/dgraph.py @@ -19,7 +19,7 @@ def reduce_item(reduced_item, key, value): # Reduction Condition 2 elif type(value) is dict: - sub_keys = list(value.keys()) + sub_keys = value.keys() for sub_key in sub_keys: reduce_item(reduced_item, '{}.{}'.format(key, sub_key), value[sub_key]) @@ -105,7 +105,7 @@ def run_query(self, query, user): reduced_item = {} reduce_item(reduced_item, first_key, item) - header += list(reduced_item.keys()) + header += reduced_item.keys() processed_data.append(reduced_item) diff --git a/redash/query_runner/drill.py b/redash/query_runner/drill.py index 869560fc2d..4c19fdfbc4 100644 --- a/redash/query_runner/drill.py +++ b/redash/query_runner/drill.py @@ -120,7 +120,7 @@ def get_schema(self, get_stats=False): """ allowed_schemas = self.configuration.get('allowed_schemas') if allowed_schemas: - query += "and TABLE_SCHEMA in ({})".format(', '.join(["'{}'".format(re.sub('[^a-zA-Z0-9_.`]', '', x)) for x in allowed_schemas.split(',')])) + query += "and TABLE_SCHEMA in ({})".format(', '.join(["'{}'".format(re.sub('[^a-zA-Z0-9_.`]', '', allowed_schema)) for allowed_schema in allowed_schemas.split(',')])) results, error = self.run_query(query, None) diff --git a/redash/query_runner/druid.py b/redash/query_runner/druid.py index a8530ccd5d..17e55f4a67 100644 --- a/redash/query_runner/druid.py +++ b/redash/query_runner/druid.py @@ -55,7 +55,7 @@ def run_query(self, query, user): try: cursor.execute(query) columns = self.fetch_columns([(i[0], TYPES_MAP.get(i[1], None)) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor] + rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor] data = {'columns': columns, 'rows': rows} error = None diff --git a/redash/query_runner/elasticsearch.py b/redash/query_runner/elasticsearch.py index abf1495186..a5f08c8e28 100644 --- a/redash/query_runner/elasticsearch.py +++ b/redash/query_runner/elasticsearch.py @@ -151,7 +151,7 @@ def parse_doc(doc, path=None): ''' path = path or [] result = [] - for field, description in list(doc['properties'].items()): + for field, description in doc['properties'].items(): if 'properties' in description: result.extend(parse_doc(description, path + [field])) else: @@ -166,10 +166,10 @@ def parse_doc(doc, path=None): # make a schema for each index # the index contains a mappings dict with documents # in a hierarchical format - for name, index in list(mappings.items()): + for name, index in mappings.items(): columns = [] schema[name] = {'name': name} - for doc, items in list(index['mappings'].items()): + for doc, items in index['mappings'].items(): columns.extend(parse_doc(items)) # remove duplicates diff --git a/redash/query_runner/google_analytics.py b/redash/query_runner/google_analytics.py index 2fbb77b485..ed519e6c1c 100644 --- a/redash/query_runner/google_analytics.py +++ b/redash/query_runner/google_analytics.py @@ -145,7 +145,7 @@ def run_query(self, query, user): params = json_loads(query) except: params = parse_qs(urlparse(query).query, keep_blank_values=True) - for key in list(params.keys()): + for key in params.keys(): params[key] = ','.join(params[key]) if '-' in key: params[key.replace('-', '_')] = params.pop(key) diff --git a/redash/query_runner/google_spreadsheets.py b/redash/query_runner/google_spreadsheets.py index a04da1ddf9..7584153665 100644 --- a/redash/query_runner/google_spreadsheets.py +++ b/redash/query_runner/google_spreadsheets.py @@ -50,7 +50,7 @@ def _get_columns_and_column_names(row): def _value_eval_list(row_values, col_types): value_list = [] - raw_values = list(zip(col_types, row_values)) + raw_values = zip(col_types, row_values) for typ, rval in raw_values: try: if rval is None or rval == '': @@ -100,7 +100,7 @@ def parse_worksheet(worksheet): columns[j]['type'] = guess_type(value) column_types = [c['type'] for c in columns] - rows = [dict(list(zip(column_names, _value_eval_list(row, column_types)))) for row in worksheet[HEADER_INDEX + 1:]] + rows = [dict(zip(column_names, _value_eval_list(row, column_types))) for row in worksheet[HEADER_INDEX + 1:]] data = {'columns': columns, 'rows': rows} return data diff --git a/redash/query_runner/hive_ds.py b/redash/query_runner/hive_ds.py index 33428b98e4..59555f6589 100644 --- a/redash/query_runner/hive_ds.py +++ b/redash/query_runner/hive_ds.py @@ -120,7 +120,7 @@ def run_query(self, query, user): 'type': types_map.get(column[COLUMN_TYPE], None) }) - rows = [dict(list(zip(column_names, row))) for row in cursor] + rows = [dict(zip(column_names, row)) for row in cursor] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data) diff --git a/redash/query_runner/impala_ds.py b/redash/query_runner/impala_ds.py index be2bea30c4..7f64164f73 100644 --- a/redash/query_runner/impala_ds.py +++ b/redash/query_runner/impala_ds.py @@ -117,7 +117,7 @@ def run_query(self, query, user): 'type': types_map.get(column[COLUMN_TYPE], None) }) - rows = [dict(list(zip(column_names, row))) for row in cursor] + rows = [dict(zip(column_names, row)) for row in cursor] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data) diff --git a/redash/query_runner/influx_db.py b/redash/query_runner/influx_db.py index 2a53c12343..bec53c8d27 100644 --- a/redash/query_runner/influx_db.py +++ b/redash/query_runner/influx_db.py @@ -23,7 +23,7 @@ def _transform_result(results): if column not in result_columns: result_columns.append(column) tags = series.get('tags', {}) - for key in list(tags.keys()): + for key in tags.keys(): if key not in result_columns: result_columns.append(key) diff --git a/redash/query_runner/jql.py b/redash/query_runner/jql.py index 1550bbe16c..7b10387353 100644 --- a/redash/query_runner/jql.py +++ b/redash/query_runner/jql.py @@ -12,7 +12,7 @@ def __init__(self): self.rows = [] def add_row(self, row): - for key in list(row.keys()): + for key in row.keys(): self.add_column(key) self.rows.append(row) diff --git a/redash/query_runner/kylin.py b/redash/query_runner/kylin.py index 01b0ef02c6..c08954d3a1 100644 --- a/redash/query_runner/kylin.py +++ b/redash/query_runner/kylin.py @@ -132,7 +132,7 @@ def get_columns(self, colmetas): def get_rows(self, columns, results): return [ - dict(list(zip((c['name'] for c in columns), row))) + dict(zip((column['name'] for column in columns), row)) for row in results ] diff --git a/redash/query_runner/mapd.py b/redash/query_runner/mapd.py index 652b9d4dc6..d4e6eaef0d 100644 --- a/redash/query_runner/mapd.py +++ b/redash/query_runner/mapd.py @@ -82,7 +82,7 @@ def run_query(self, query, user): try: cursor.execute(query) columns = self.fetch_columns([(i[0], TYPES_MAP.get(i[1], None)) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor] + rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor] data = {'columns': columns, 'rows': rows} error = None json_data = json_dumps(data) diff --git a/redash/query_runner/memsql_ds.py b/redash/query_runner/memsql_ds.py index dadd2844c7..4525920e03 100644 --- a/redash/query_runner/memsql_ds.py +++ b/redash/query_runner/memsql_ds.py @@ -107,13 +107,13 @@ def run_query(self, query, user): # 'type': types_map.get(column[COLUMN_TYPE], None) # }) - rows = [dict(list(zip(list(row.keys()), list(row.values())))) for row in res] + rows = [dict(zip(row.keys(), row.values())) for row in res] # ==================================================================================================== # temporary - until https://github.com/memsql/memsql-python/pull/8 gets merged # ==================================================================================================== columns = [] - column_names = list(rows[0].keys()) if rows else None + column_names = rows[0].keys() if rows else None if column_names: for column in column_names: diff --git a/redash/query_runner/mssql.py b/redash/query_runner/mssql.py index d59bf6e6ae..541c736747 100644 --- a/redash/query_runner/mssql.py +++ b/redash/query_runner/mssql.py @@ -137,7 +137,7 @@ def run_query(self, query, user): if cursor.description is not None: columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) for row in data] + rows = [dict(zip((column['name'] for column in columns), row)) for row in data] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data) diff --git a/redash/query_runner/mssql_odbc.py b/redash/query_runner/mssql_odbc.py index a453d45a90..48c18cb930 100644 --- a/redash/query_runner/mssql_odbc.py +++ b/redash/query_runner/mssql_odbc.py @@ -126,7 +126,7 @@ def run_query(self, query, user): if cursor.description is not None: columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) for row in data] + rows = [dict(zip((column['name'] for column in columns), row)) for row in data] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data) diff --git a/redash/query_runner/mysql.py b/redash/query_runner/mysql.py index 3e7abaa18a..a6f5d5c480 100644 --- a/redash/query_runner/mysql.py +++ b/redash/query_runner/mysql.py @@ -197,7 +197,7 @@ def _run_query(self, query, user, connection, r, ev): columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in desc]) rows = [ - dict(list(zip((c['name'] for c in columns), row))) + dict(zip((column['name'] for column in columns), row)) for row in data ] @@ -226,8 +226,12 @@ def _get_ssl_parameters(self): ssl_params = {} if self.configuration.get('use_ssl'): - config_map = dict(ssl_cacert='ca', ssl_cert='cert', ssl_key='key') - for key, cfg in list(config_map.items()): + config_map = { + "ssl_cacert": "ca", + "ssl_cert": "cert", + "ssl_key": "key", + } + for key, cfg in config_map.items(): val = self.configuration.get(key) if val: ssl_params[cfg] = val diff --git a/redash/query_runner/oracle.py b/redash/query_runner/oracle.py index ca4ac2bc97..11a382c35b 100644 --- a/redash/query_runner/oracle.py +++ b/redash/query_runner/oracle.py @@ -143,7 +143,7 @@ def run_query(self, query, user): rows_count = cursor.rowcount if cursor.description is not None: columns = self.fetch_columns([(i[0], Oracle.get_col_type(i[1], i[5])) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor] + rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor] data = {'columns': columns, 'rows': rows} error = None json_data = json_dumps(data) diff --git a/redash/query_runner/pg.py b/redash/query_runner/pg.py index 7e78ce3a45..99787730bb 100644 --- a/redash/query_runner/pg.py +++ b/redash/query_runner/pg.py @@ -214,7 +214,7 @@ def run_query(self, query, user): columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description]) rows = [ - dict(list(zip((c['name'] for c in columns), row))) + dict(zip((column['name'] for column in columns), row)) for row in cursor ] diff --git a/redash/query_runner/phoenix.py b/redash/query_runner/phoenix.py index 2c8d4883f8..22fc7c996c 100644 --- a/redash/query_runner/phoenix.py +++ b/redash/query_runner/phoenix.py @@ -99,7 +99,7 @@ def run_query(self, query, user): cursor.execute(query) column_tuples = [(i[0], TYPES_MAPPING.get(i[1], None)) for i in cursor.description] columns = self.fetch_columns(column_tuples) - rows = [dict(list(zip(([c['name'] for c in columns]), r))) for i, r in enumerate(cursor.fetchall())] + rows = [dict(zip(([column['name'] for column in columns]), r)) for i, r in enumerate(cursor.fetchall())] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data) error = None diff --git a/redash/query_runner/presto.py b/redash/query_runner/presto.py index 5bc4d0b03e..56369903ef 100644 --- a/redash/query_runner/presto.py +++ b/redash/query_runner/presto.py @@ -114,7 +114,7 @@ def run_query(self, query, user): column_tuples = [(i[0], PRESTO_TYPES_MAPPING.get(i[1], None)) for i in cursor.description] columns = self.fetch_columns(column_tuples) - rows = [dict(list(zip(([c['name'] for c in columns]), r))) + rows = [dict(zip(([column['name'] for column in columns]), r)) for i, r in enumerate(cursor.fetchall())] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data) diff --git a/redash/query_runner/prometheus.py b/redash/query_runner/prometheus.py index 05a7343c8a..180fa7bc7d 100644 --- a/redash/query_runner/prometheus.py +++ b/redash/query_runner/prometheus.py @@ -44,7 +44,7 @@ def convert_query_range(payload): query_range = {} for key in ['start', 'end']: - if key not in list(payload.keys()): + if key not in payload.keys(): continue value = payload[key][0] @@ -134,10 +134,10 @@ def run_query(self, query, user): query = 'query={}'.format(query) if not query.startswith('query=') else query payload = parse_qs(query) - query_type = 'query_range' if 'step' in list(payload.keys()) else 'query' + query_type = 'query_range' if 'step' in payload.keys() else 'query' # for the range of until now - if query_type == 'query_range' and ('end' not in list(payload.keys()) or 'now' in payload['end']): + if query_type == 'query_range' and ('end' not in payload.keys() or 'now' in payload['end']): date_now = datetime.now() payload.update({'end': [date_now]}) @@ -153,7 +153,7 @@ def run_query(self, query, user): if len(metrics) == 0: return None, 'query result is empty.' - metric_labels = list(metrics[0]['metric'].keys()) + metric_labels = metrics[0]['metric'].keys() for label_name in metric_labels: columns.append({ diff --git a/redash/query_runner/qubole.py b/redash/query_runner/qubole.py index e14950aedd..c5ea676d6f 100644 --- a/redash/query_runner/qubole.py +++ b/redash/query_runner/qubole.py @@ -106,7 +106,7 @@ def run_query(self, query, user): data = results.split('\r\n') columns = self.fetch_columns([(i, TYPE_STRING) for i in data.pop(0).split('\t')]) - rows = [dict(list(zip((c['name'] for c in columns), row.split('\t')))) for row in data] + rows = [dict(zip((column['name'] for column in columns), row.split('\t'))) for row in data] json_data = json_dumps({'columns': columns, 'rows': rows}) except KeyboardInterrupt: diff --git a/redash/query_runner/query_results.py b/redash/query_runner/query_results.py index 06252ab025..564a234470 100644 --- a/redash/query_runner/query_results.py +++ b/redash/query_runner/query_results.py @@ -153,7 +153,7 @@ def run_query(self, query, user): elif columns[j]['type'] != guess: columns[j]['type'] = TYPE_STRING - rows.append(dict(list(zip(column_names, row)))) + rows.append(dict(zip(column_names, row))) data = {'columns': columns, 'rows': rows} error = None diff --git a/redash/query_runner/rockset.py b/redash/query_runner/rockset.py index de5f064b15..5d3a6a332e 100644 --- a/redash/query_runner/rockset.py +++ b/redash/query_runner/rockset.py @@ -79,7 +79,7 @@ def _get_tables(self, schema): for col in self.api.list(): table_name = col['name'] describe = self.api.query('DESCRIBE "{}"'.format(table_name)) - columns = list(set([x['field'][0] for x in describe['results']])) + columns = list(set([result['field'][0] for result in describe['results']])) schema[table_name] = {'name': table_name, 'columns': columns} return list(schema.values()) diff --git a/redash/query_runner/salesforce.py b/redash/query_runner/salesforce.py index 40094993e9..3a2af3ab6c 100644 --- a/redash/query_runner/salesforce.py +++ b/redash/query_runner/salesforce.py @@ -120,7 +120,7 @@ def _build_columns(self, sf, child, parents=[]): child_desc = sf.__getattr__(child_type).describe() child_type_map = dict((f['name'], f['type'])for f in child_desc['fields']) columns = [] - for key in list(child.keys()): + for key in child.keys(): if key != 'attributes': if isinstance(child[key], OrderedDict) and 'attributes' in child[key]: columns.extend(self._build_columns(sf, child[key], parents + [key])) diff --git a/redash/query_runner/snowflake.py b/redash/query_runner/snowflake.py index cdbbd85712..31f5be4c98 100644 --- a/redash/query_runner/snowflake.py +++ b/redash/query_runner/snowflake.py @@ -92,7 +92,7 @@ def run_query(self, query, user): columns = self.fetch_columns( [(i[0], self.determine_type(i[1], i[5])) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) + rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor] data = {'columns': columns, 'rows': rows} diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py index 0efdb25742..3881e569f6 100644 --- a/redash/query_runner/sqlite.py +++ b/redash/query_runner/sqlite.py @@ -66,7 +66,7 @@ def run_query(self, query, user): if cursor.description is not None: columns = self.fetch_columns([(i[0], None) for i in cursor.description]) - rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor] + rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor] data = {'columns': columns, 'rows': rows} error = None diff --git a/redash/query_runner/treasuredata.py b/redash/query_runner/treasuredata.py index 2d26048e0b..88637ec957 100644 --- a/redash/query_runner/treasuredata.py +++ b/redash/query_runner/treasuredata.py @@ -104,7 +104,7 @@ def run_query(self, query, user): if cursor.rowcount == 0: rows = [] else: - rows = [dict(list(zip(([c['name'] for c in columns]), r))) for i, r in enumerate(cursor.fetchall())] + rows = [dict(zip(([column['name'] for column in columns]), r)) for r in cursor.fetchall()] data = {'columns': columns, 'rows': rows} json_data = json_dumps(data) error = None diff --git a/redash/query_runner/yandex_metrica.py b/redash/query_runner/yandex_metrica.py index f4ef64a432..3736eb1290 100644 --- a/redash/query_runner/yandex_metrica.py +++ b/redash/query_runner/yandex_metrica.py @@ -23,10 +23,10 @@ 'pageViewsInterval', 'pageViews', 'firstVisitYear', 'firstVisitMonth', 'firstVisitDayOfMonth', 'firstVisitDayOfWeek', 'firstVisitMinute', 'firstVisitDekaminute', - ) + ), } -for type_, elements in list(COLUMN_TYPES.items()): +for type_, elements in COLUMN_TYPES.items(): for el in elements: if 'first' in el: el = el.replace('first', 'last') diff --git a/redash/serializers/query_result.py b/redash/serializers/query_result.py index 0e8afcd627..5d43734406 100644 --- a/redash/serializers/query_result.py +++ b/redash/serializers/query_result.py @@ -41,7 +41,7 @@ def _get_column_lists(columns): special_types = { TYPE_BOOLEAN: _convert_bool, TYPE_DATE: rpartial(_convert_datetime, date_format), - TYPE_DATETIME: rpartial(_convert_datetime, datetime_format) + TYPE_DATETIME: rpartial(_convert_datetime, datetime_format), } fieldnames = [] @@ -50,7 +50,7 @@ def _get_column_lists(columns): for col in columns: fieldnames.append(col['name']) - for col_type in list(special_types.keys()): + for col_type in special_types.keys(): if col['type'] == col_type: special_columns[col['name']] = special_types[col_type] @@ -86,24 +86,24 @@ def serialize_query_result_to_csv(query_result): def serialize_query_result_to_xlsx(query_result): - s = io.BytesIO() + output = io.BytesIO() query_data = query_result.data - book = xlsxwriter.Workbook(s, {'constant_memory': True}) + book = xlsxwriter.Workbook(output, {'constant_memory': True}) sheet = book.add_worksheet("result") column_names = [] - for (c, col) in enumerate(query_data['columns']): + for c, col in enumerate(query_data['columns']): sheet.write(0, c, col['name']) column_names.append(col['name']) - for (r, row) in enumerate(query_data['rows']): - for (c, name) in enumerate(column_names): + for r, row in enumerate(query_data['rows']): + for c, name in enumerate(column_names): v = row.get(name) - if isinstance(v, list) or isinstance(v, dict): + if isinstance(v, (dict, list)): v = str(v) sheet.write(r + 1, c, v) book.close() - return s.getvalue() + return output.getvalue() diff --git a/redash/worker.py b/redash/worker.py index 02e067c01c..6e3e68b5fc 100644 --- a/redash/worker.py +++ b/redash/worker.py @@ -90,6 +90,6 @@ def add_periodic_tasks(sender, **kwargs): """Load all periodic tasks from extensions and add them to Celery.""" # Populate the redash.extensions.periodic_tasks dictionary extensions.load_periodic_tasks(logger) - for params in list(extensions.periodic_tasks.values()): + for params in extensions.periodic_tasks.values(): # Add it to Celery's periodic task registry, too. sender.add_periodic_task(**params) diff --git a/tests/extensions/test_extensions.py b/tests/extensions/test_extensions.py index 1c9137589f..dff1dbca6a 100644 --- a/tests/extensions/test_extensions.py +++ b/tests/extensions/test_extensions.py @@ -20,26 +20,26 @@ def tearDownClass(cls): sys.path.remove(dummy_path) def test_working_extension(self): - self.assertIn("working_extension", list(extensions.extensions.keys())) + self.assertIn("working_extension", extensions.extensions.keys()) self.assertEqual( extensions.extensions.get("working_extension"), "extension loaded" ) def test_assertive_extension(self): - self.assertNotIn("assertive_extension", list(extensions.extensions.keys())) + self.assertNotIn("assertive_extension", extensions.extensions.keys()) def test_not_findable_extension(self): - self.assertNotIn("not_findable_extension", list(extensions.extensions.keys())) + self.assertNotIn("not_findable_extension", extensions.extensions.keys()) def test_not_importable_extension(self): - self.assertNotIn("not_importable_extension", list(extensions.extensions.keys())) + self.assertNotIn("not_importable_extension", extensions.extensions.keys()) def test_non_callable_extension(self): - self.assertNotIn("non_callable_extension", list(extensions.extensions.keys())) + self.assertNotIn("non_callable_extension", extensions.extensions.keys()) def test_dummy_periodic_task(self): # need to load the periodic tasks manually since this isn't # done automatically on test suite start but only part of # the worker configuration extensions.load_periodic_tasks(logger) - self.assertIn("dummy_periodic_task", list(extensions.periodic_tasks.keys())) + self.assertIn("dummy_periodic_task", extensions.periodic_tasks.keys()) diff --git a/tests/factories.py b/tests/factories.py index 4193b8adfa..2915cc7d4a 100644 --- a/tests/factories.py +++ b/tests/factories.py @@ -15,7 +15,7 @@ def _get_kwargs(self, override_kwargs): kwargs = self.kwargs.copy() kwargs.update(override_kwargs) - for key, arg in list(kwargs.items()): + for key, arg in kwargs.items(): if callable(arg): kwargs[key] = arg() diff --git a/tests/handlers/test_dashboards.py b/tests/handlers/test_dashboards.py index 1a3ea09cef..4207cb2bde 100644 --- a/tests/handlers/test_dashboards.py +++ b/tests/handlers/test_dashboards.py @@ -25,7 +25,7 @@ def test_returns_dashboards(self): rv = self.make_request('get', '/api/dashboards') assert len(rv.json['results']) == 3 - assert set([d['id'] for d in rv.json['results']]) == set([d1.id, d2.id, d3.id]) + assert set([result['id'] for result in rv.json['results']]) == set([d1.id, d2.id, d3.id]) def test_filters_with_tags(self): d1 = self.factory.create_dashboard(tags=['test']) @@ -34,7 +34,7 @@ def test_filters_with_tags(self): rv = self.make_request('get', '/api/dashboards?tags=test') assert len(rv.json['results']) == 1 - assert set([d['id'] for d in rv.json['results']]) == set([d1.id]) + assert set([result['id'] for result in rv.json['results']]) == set([d1.id]) def test_search_term(self): d1 = self.factory.create_dashboard(name="Sales") @@ -43,7 +43,7 @@ def test_search_term(self): rv = self.make_request('get', '/api/dashboards?q=sales') assert len(rv.json['results']) == 2 - assert set([d['id'] for d in rv.json['results']]) == set([d1.id, d2.id]) + assert set([result['id'] for result in rv.json['results']]) == set([d1.id, d2.id]) class TestDashboardResourceGet(BaseTestCase): diff --git a/tests/handlers/test_data_sources.py b/tests/handlers/test_data_sources.py index 4f1dbdd80d..4aa0b4fc61 100644 --- a/tests/handlers/test_data_sources.py +++ b/tests/handlers/test_data_sources.py @@ -31,7 +31,7 @@ def test_returns_data_sources_ordered_by_id(self): self.factory.create_data_source(group=self.factory.org.default_group) self.factory.create_data_source(group=self.factory.org.default_group) response = self.make_request("get", "/api/data_sources", user=self.factory.user) - ids = [d['id'] for d in response.json] + ids = [datasource['id'] for datasource in response.json] self.assertTrue(all(left <= right for left, right in pairwise(ids))) @@ -46,7 +46,7 @@ def test_does_not_show_deprecated_types(self): with patch.object(PostgreSQL, 'deprecated', return_value=True): rv = self.make_request('get', "/api/data_sources/types", user=admin) - types = [x['type'] for x in rv.json] + types = [datasource_type['type'] for datasource_type in rv.json] self.assertNotIn('pg', types) def test_returns_403_for_non_admin(self): diff --git a/tests/handlers/test_destinations.py b/tests/handlers/test_destinations.py index 2d0bc0b458..6736e936b7 100644 --- a/tests/handlers/test_destinations.py +++ b/tests/handlers/test_destinations.py @@ -80,7 +80,7 @@ def test_post(self): d = NotificationDestination.query.get(d.id) self.assertEqual(d.name, data['name']) self.assertEqual(d.options['url'], data['options']['url']) - + class DestinationTypesTest(BaseTestCase): def test_does_not_show_deprecated_types(self): @@ -88,5 +88,5 @@ def test_does_not_show_deprecated_types(self): with patch.object(Slack, 'deprecated', return_value=True): rv = self.make_request('get', "/api/destinations/types", user=admin) - types = [x['type'] for x in rv.json] - self.assertNotIn('slack', types) \ No newline at end of file + types = [destination_type['type'] for destination_type in rv.json] + self.assertNotIn('slack', types) diff --git a/tests/handlers/test_queries.py b/tests/handlers/test_queries.py index 562142adfa..89f39dd933 100644 --- a/tests/handlers/test_queries.py +++ b/tests/handlers/test_queries.py @@ -201,7 +201,7 @@ def test_returns_queries(self): rv = self.make_request('get', '/api/queries') assert len(rv.json['results']) == 3 - assert set([d['id'] for d in rv.json['results']]) == set([q1.id, q2.id, q3.id]) + assert set([result['id'] for result in rv.json['results']]) == set([q1.id, q2.id, q3.id]) def test_filters_with_tags(self): q1 = self.factory.create_query(tags=['test']) @@ -210,7 +210,7 @@ def test_filters_with_tags(self): rv = self.make_request('get', '/api/queries?tags=test') assert len(rv.json['results']) == 1 - assert set([d['id'] for d in rv.json['results']]) == set([q1.id]) + assert set([result['id'] for result in rv.json['results']]) == set([q1.id]) def test_search_term(self): q1 = self.factory.create_query(name="Sales") @@ -219,7 +219,7 @@ def test_search_term(self): rv = self.make_request('get', '/api/queries?q=sales') assert len(rv.json['results']) == 2 - assert set([d['id'] for d in rv.json['results']]) == set([q1.id, q2.id]) + assert set([result['id'] for result in rv.json['results']]) == set([q1.id, q2.id]) class TestQueryListResourcePost(BaseTestCase): @@ -320,7 +320,7 @@ def test_returns_queries(self): rv = self.make_request('get', '/api/queries/archive') assert len(rv.json['results']) == 2 - assert set([d['id'] for d in rv.json['results']]) == set([q1.id, q2.id]) + assert set([result['id'] for result in rv.json['results']]) == set([q1.id, q2.id]) def test_search_term(self): q1 = self.factory.create_query(name="Sales", is_archived=True) @@ -329,7 +329,7 @@ def test_search_term(self): rv = self.make_request('get', '/api/queries/archive?q=sales') assert len(rv.json['results']) == 2 - assert set([d['id'] for d in rv.json['results']]) == set([q1.id, q2.id]) + assert set([result['id'] for result in rv.json['results']]) == set([q1.id, q2.id]) class QueryRefreshTest(BaseTestCase): diff --git a/tests/handlers/test_users.py b/tests/handlers/test_users.py index aebaad6153..0657e7eab1 100644 --- a/tests/handlers/test_users.py +++ b/tests/handlers/test_users.py @@ -112,7 +112,7 @@ class PlainObject(object): def make_request_and_return_ids(self, *args, **kwargs): rv = self.make_request(*args, **kwargs) - return [u['id'] for u in rv.json['results']] + return [user['id'] for user in rv.json['results']] def assertUsersListMatches(self, actual_ids, expected_ids, unexpected_ids): actual_ids = set(actual_ids) diff --git a/tests/models/test_queries.py b/tests/models/test_queries.py index f573d68fb6..1aaeb64dbc 100644 --- a/tests/models/test_queries.py +++ b/tests/models/test_queries.py @@ -428,4 +428,4 @@ def test_doesnt_update_queries_with_different_data_source(self): self.assertEqual(query1.latest_query_data, query_result) self.assertEqual(query2.latest_query_data, query_result) - self.assertNotEqual(query3.latest_query_data, query_result) \ No newline at end of file + self.assertNotEqual(query3.latest_query_data, query_result) diff --git a/tests/query_runner/test_google_spreadsheets.py b/tests/query_runner/test_google_spreadsheets.py index 36d9f2487a..ad236b883d 100644 --- a/tests/query_runner/test_google_spreadsheets.py +++ b/tests/query_runner/test_google_spreadsheets.py @@ -65,7 +65,7 @@ def test_parse_worksheet_with_duplicate_column_names(self): worksheet = [['Column', 'Another Column', 'Column'], ['A', 'TRUE', '1'], ['B', 'FALSE', '2'], ['C', 'TRUE', '3'], ['D', 'FALSE', '4']] parsed = parse_worksheet(worksheet) - columns = [c['name'] for c in parsed['columns']] + columns = [column['name'] for column in parsed['columns']] self.assertEqual('Column', columns[0]) self.assertEqual('Another Column', columns[1]) self.assertEqual('Column1', columns[2])