Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert some changes 2to3 tends to do #4261

Merged
merged 2 commits into from
Oct 18, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions bin/release_manager.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
#!/usr/bin/env python3
import os
import sys
import re
Expand Down
2 changes: 1 addition & 1 deletion redash/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ def __init__(self, *args, **kwargs):
kwargs.update({
'template_folder': settings.STATIC_ASSETS_PATH,
'static_folder': settings.STATIC_ASSETS_PATH,
'static_path': '/static',
'static_url_path': '/static',
})
super(Redash, self).__init__(__name__, *args, **kwargs)
# Make sure we get the right referral address even behind proxies like nginx.
Expand Down
2 changes: 1 addition & 1 deletion redash/cli/rq.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def scheduler():
@argument('queues', nargs=-1)
def worker(queues='default'):
if not queues:
queues = ('default', )
queues = ('default',)
with Connection(rq_redis_connection):
w = Worker(queues)
w.work()
Expand Down
2 changes: 1 addition & 1 deletion redash/handlers/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def record_event(self, options):

# TODO: this should probably be somewhere else
def update_model(self, model, updates):
for k, v in list(updates.items()):
for k, v in updates.items():
setattr(model, k, v)


Expand Down
2 changes: 1 addition & 1 deletion redash/handlers/data_sources.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
class DataSourceTypeListResource(BaseResource):
@require_admin
def get(self):
available_query_runners = [q for q in list(query_runners.values()) if not q.deprecated]
available_query_runners = [q for q in query_runners.values() if not q.deprecated]
return [q.to_dict() for q in sorted(available_query_runners, key=lambda q: q.name())]


Expand Down
2 changes: 1 addition & 1 deletion redash/handlers/destinations.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
class DestinationTypeListResource(BaseResource):
@require_admin
def get(self):
available_destinations = [q for q in list(destinations.values()) if not q.deprecated]
available_destinations = [q for q in destinations.values() if not q.deprecated]
return [q.to_dict() for q in available_destinations]


Expand Down
2 changes: 1 addition & 1 deletion redash/handlers/queries.py
Original file line number Diff line number Diff line change
Expand Up @@ -336,7 +336,7 @@ def post(self, query_id):
query_def['query_text'] = query_def.pop('query')

if 'tags' in query_def:
query_def['tags'] = [_f for _f in query_def['tags'] if _f]
query_def['tags'] = [tag for tag in query_def['tags'] if tag]

query_def['last_modified_by'] = self.current_user
query_def['changed_by'] = self.current_user
Expand Down
6 changes: 3 additions & 3 deletions redash/models/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def groups(self):
groups = DataSourceGroup.query.filter(
DataSourceGroup.data_source == self
)
return dict([(g.group_id, g.view_only) for g in groups])
return dict([(group.group_id, group.view_only) for group in groups])


@generic_repr('id', 'data_source_id', 'group_id', 'view_only')
Expand Down Expand Up @@ -543,8 +543,8 @@ def past_scheduled_queries(cls):
.filter(Query.schedule.isnot(None))
.order_by(Query.id)
)
return [x for x in queries if x.schedule["until"] is not None and pytz.utc.localize(
datetime.datetime.strptime(x.schedule['until'], '%Y-%m-%d')
return [query for query in queries if query.schedule["until"] is not None and pytz.utc.localize(
datetime.datetime.strptime(query.schedule['until'], '%Y-%m-%d')
) <= now]

@classmethod
Expand Down
10 changes: 5 additions & 5 deletions redash/models/parameterized_query.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@


def _pluck_name_and_value(default_column, row):
row = {k.lower(): v for k, v in list(row.items())}
name_column = "name" if "name" in list(row.keys()) else default_column.lower()
value_column = "value" if "value" in list(row.keys()) else default_column.lower()
row = {k.lower(): v for k, v in row.items()}
name_column = "name" if "name" in row.keys() else default_column.lower()
value_column = "value" if "value" in row.keys() else default_column.lower()

return {"name": row[name_column], "value": text_type(row[value_column])}

Expand Down Expand Up @@ -73,7 +73,7 @@ def _parameter_names(parameter_values):
names = []
for key, value in parameter_values.items():
if isinstance(value, dict):
for inner_key in list(value.keys()):
for inner_key in value.keys():
names.append('{}.{}'.format(key, inner_key))
else:
names.append(key)
Expand Down Expand Up @@ -170,7 +170,7 @@ def _valid(self, name, value):

@property
def is_safe(self):
text_parameters = [p for p in self.schema if p["type"] == "text"]
text_parameters = [param for param in self.schema if param["type"] == "text"]
return not any(text_parameters)

@property
Expand Down
2 changes: 1 addition & 1 deletion redash/monitor.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def get_waiting_in_queue(queue_name):
def parse_tasks(task_lists, state):
rows = []

for task in itertools.chain(*list(task_lists.values())):
for task in itertools.chain(*task_lists.values()):
task_row = {
'state': state,
'task_name': task['name'],
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def _get_tables(self, schema_dict):
return []

def _get_tables_stats(self, tables_dict):
for t in list(tables_dict.keys()):
for t in tables_dict.keys():
if type(tables_dict[t]) == dict:
res = self._run_query_internal('select count(*) as cnt from %s' % t)
tables_dict[t]['size'] = res[0]['cnt']
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/athena.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def run_query(self, query, user):
cursor.execute(query)
column_tuples = [(i[0], _TYPE_MAPPINGS.get(i[1], None)) for i in cursor.description]
columns = self.fetch_columns(column_tuples)
rows = [dict(list(zip(([c['name'] for c in columns]), r))) for i, r in enumerate(cursor.fetchall())]
rows = [dict(zip(([c['name'] for c in columns]), r)) for i, r in enumerate(cursor.fetchall())]
qbytes = None
athena_query_id = None
try:
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/axibase_tsd.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def generate_rows_and_columns(csv_response):
'name': i['name']}
for i in meta_columns]
column_names = [c['name'] for c in columns]
rows = [dict(list(zip(column_names, row))) for row in reader]
rows = [dict(zip(column_names, row)) for row in reader]
return columns, rows


Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/azure_kusto.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def get_schema(self, get_stats=False):
results = json_loads(results)

schema_as_json = json_loads(results['rows'][0]['DatabaseSchema'])
tables_list = list(schema_as_json['Databases'][self.configuration['database']]['Tables'].values())
tables_list = schema_as_json['Databases'][self.configuration['database']]['Tables'].values()

schema = {}

Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/cass.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def run_query(self, query, user):

columns = self.fetch_columns([(c, 'string') for c in column_names])

rows = [dict(list(zip(column_names, row))) for row in result]
rows = [dict(zip(column_names, row)) for row in result]

data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data, cls=CassandraJSONEncoder)
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/db2.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def run_query(self, query, user):

if cursor.description is not None:
columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])
rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor]
rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor]

data = {'columns': columns, 'rows': rows}
error = None
Expand Down
4 changes: 2 additions & 2 deletions redash/query_runner/dgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def reduce_item(reduced_item, key, value):

# Reduction Condition 2
elif type(value) is dict:
sub_keys = list(value.keys())
sub_keys = value.keys()
for sub_key in sub_keys:
reduce_item(reduced_item, '{}.{}'.format(key, sub_key), value[sub_key])

Expand Down Expand Up @@ -105,7 +105,7 @@ def run_query(self, query, user):
reduced_item = {}
reduce_item(reduced_item, first_key, item)

header += list(reduced_item.keys())
header += reduced_item.keys()

processed_data.append(reduced_item)

Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/drill.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def get_schema(self, get_stats=False):
"""
allowed_schemas = self.configuration.get('allowed_schemas')
if allowed_schemas:
query += "and TABLE_SCHEMA in ({})".format(', '.join(["'{}'".format(re.sub('[^a-zA-Z0-9_.`]', '', x)) for x in allowed_schemas.split(',')]))
query += "and TABLE_SCHEMA in ({})".format(', '.join(["'{}'".format(re.sub('[^a-zA-Z0-9_.`]', '', allowed_schema)) for allowed_schema in allowed_schemas.split(',')]))

results, error = self.run_query(query, None)

Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/druid.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def run_query(self, query, user):
try:
cursor.execute(query)
columns = self.fetch_columns([(i[0], TYPES_MAP.get(i[1], None)) for i in cursor.description])
rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor]
rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor]

data = {'columns': columns, 'rows': rows}
error = None
Expand Down
6 changes: 3 additions & 3 deletions redash/query_runner/elasticsearch.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def parse_doc(doc, path=None):
'''
path = path or []
result = []
for field, description in list(doc['properties'].items()):
for field, description in doc['properties'].items():
if 'properties' in description:
result.extend(parse_doc(description, path + [field]))
else:
Expand All @@ -166,10 +166,10 @@ def parse_doc(doc, path=None):
# make a schema for each index
# the index contains a mappings dict with documents
# in a hierarchical format
for name, index in list(mappings.items()):
for name, index in mappings.items():
columns = []
schema[name] = {'name': name}
for doc, items in list(index['mappings'].items()):
for doc, items in index['mappings'].items():
columns.extend(parse_doc(items))

# remove duplicates
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/google_analytics.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ def run_query(self, query, user):
params = json_loads(query)
except:
params = parse_qs(urlparse(query).query, keep_blank_values=True)
for key in list(params.keys()):
for key in params.keys():
params[key] = ','.join(params[key])
if '-' in key:
params[key.replace('-', '_')] = params.pop(key)
Expand Down
4 changes: 2 additions & 2 deletions redash/query_runner/google_spreadsheets.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def _get_columns_and_column_names(row):

def _value_eval_list(row_values, col_types):
value_list = []
raw_values = list(zip(col_types, row_values))
raw_values = zip(col_types, row_values)
for typ, rval in raw_values:
try:
if rval is None or rval == '':
Expand Down Expand Up @@ -100,7 +100,7 @@ def parse_worksheet(worksheet):
columns[j]['type'] = guess_type(value)

column_types = [c['type'] for c in columns]
rows = [dict(list(zip(column_names, _value_eval_list(row, column_types)))) for row in worksheet[HEADER_INDEX + 1:]]
rows = [dict(zip(column_names, _value_eval_list(row, column_types))) for row in worksheet[HEADER_INDEX + 1:]]
data = {'columns': columns, 'rows': rows}

return data
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/hive_ds.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def run_query(self, query, user):
'type': types_map.get(column[COLUMN_TYPE], None)
})

rows = [dict(list(zip(column_names, row))) for row in cursor]
rows = [dict(zip(column_names, row)) for row in cursor]

data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/impala_ds.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def run_query(self, query, user):
'type': types_map.get(column[COLUMN_TYPE], None)
})

rows = [dict(list(zip(column_names, row))) for row in cursor]
rows = [dict(zip(column_names, row)) for row in cursor]

data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/influx_db.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def _transform_result(results):
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in list(tags.keys()):
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)

Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/jql.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ def __init__(self):
self.rows = []

def add_row(self, row):
for key in list(row.keys()):
for key in row.keys():
self.add_column(key)

self.rows.append(row)
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/kylin.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def get_columns(self, colmetas):

def get_rows(self, columns, results):
return [
dict(list(zip((c['name'] for c in columns), row)))
dict(zip((column['name'] for column in columns), row))
for row in results
]

Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/mapd.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def run_query(self, query, user):
try:
cursor.execute(query)
columns = self.fetch_columns([(i[0], TYPES_MAP.get(i[1], None)) for i in cursor.description])
rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor]
rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
error = None
json_data = json_dumps(data)
Expand Down
4 changes: 2 additions & 2 deletions redash/query_runner/memsql_ds.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,13 +107,13 @@ def run_query(self, query, user):
# 'type': types_map.get(column[COLUMN_TYPE], None)
# })

rows = [dict(list(zip(list(row.keys()), list(row.values())))) for row in res]
rows = [dict(zip(row.keys(), row.values())) for row in res]

# ====================================================================================================
# temporary - until https://github.com/memsql/memsql-python/pull/8 gets merged
# ====================================================================================================
columns = []
column_names = list(rows[0].keys()) if rows else None
column_names = rows[0].keys() if rows else None

if column_names:
for column in column_names:
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/mssql.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def run_query(self, query, user):

if cursor.description is not None:
columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])
rows = [dict(list(zip((c['name'] for c in columns), row))) for row in data]
rows = [dict(zip((column['name'] for column in columns), row)) for row in data]

data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/mssql_odbc.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def run_query(self, query, user):

if cursor.description is not None:
columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])
rows = [dict(list(zip((c['name'] for c in columns), row))) for row in data]
rows = [dict(zip((column['name'] for column in columns), row)) for row in data]

data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
Expand Down
10 changes: 7 additions & 3 deletions redash/query_runner/mysql.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def _run_query(self, query, user, connection, r, ev):
columns = self.fetch_columns([(i[0], types_map.get(i[1], None))
for i in desc])
rows = [
dict(list(zip((c['name'] for c in columns), row)))
dict(zip((column['name'] for column in columns), row))
for row in data
]

Expand Down Expand Up @@ -226,8 +226,12 @@ def _get_ssl_parameters(self):
ssl_params = {}

if self.configuration.get('use_ssl'):
config_map = dict(ssl_cacert='ca', ssl_cert='cert', ssl_key='key')
for key, cfg in list(config_map.items()):
config_map = {
"ssl_cacert": "ca",
"ssl_cert": "cert",
"ssl_key": "key",
}
for key, cfg in config_map.items():
val = self.configuration.get(key)
if val:
ssl_params[cfg] = val
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/oracle.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def run_query(self, query, user):
rows_count = cursor.rowcount
if cursor.description is not None:
columns = self.fetch_columns([(i[0], Oracle.get_col_type(i[1], i[5])) for i in cursor.description])
rows = [dict(list(zip((c['name'] for c in columns), row))) for row in cursor]
rows = [dict(zip((column['name'] for column in columns), row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
error = None
json_data = json_dumps(data)
Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/pg.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def run_query(self, query, user):
columns = self.fetch_columns([(i[0], types_map.get(i[1], None))
for i in cursor.description])
rows = [
dict(list(zip((c['name'] for c in columns), row)))
dict(zip((column['name'] for column in columns), row))
for row in cursor
]

Expand Down
2 changes: 1 addition & 1 deletion redash/query_runner/phoenix.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def run_query(self, query, user):
cursor.execute(query)
column_tuples = [(i[0], TYPES_MAPPING.get(i[1], None)) for i in cursor.description]
columns = self.fetch_columns(column_tuples)
rows = [dict(list(zip(([c['name'] for c in columns]), r))) for i, r in enumerate(cursor.fetchall())]
rows = [dict(zip(([column['name'] for column in columns]), r)) for i, r in enumerate(cursor.fetchall())]
data = {'columns': columns, 'rows': rows}
json_data = json_dumps(data)
error = None
Expand Down
Loading