Skip to content

Commit

Permalink
TST/PKG: Removed pandas.util.testing.slow definition (#16852)
Browse files Browse the repository at this point in the history
  • Loading branch information
TomAugspurger authored and jreback committed Jul 12, 2017
1 parent a9421af commit 9d13227
Show file tree
Hide file tree
Showing 19 changed files with 239 additions and 249 deletions.
1 change: 1 addition & 0 deletions doc/source/whatsnew/v0.21.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ the target. Now, a ``ValueError`` will be raised when such an input is passed in
- :class:`pandas.HDFStore`'s string representation is now faster and less detailed. For the previous behavior, use ``pandas.HDFStore.info()``. (:issue:`16503`).
- Compression defaults in HDF stores now follow pytable standards. Default is no compression and if ``complib`` is missing and ``complevel`` > 0 ``zlib`` is used (:issue:`15943`)
- ``Index.get_indexer_non_unique()`` now returns a ndarray indexer rather than an ``Index``; this is consistent with ``Index.get_indexer()`` (:issue:`16819`)
- Removed the ``@slow`` decorator from ``pandas.util.testing``, which caused issues for some downstream packages' test suites. Use ``@pytest.mark.slow`` instead, which achieves the same thing (:issue:`16850`)

.. _whatsnew_0210.api:

Expand Down
16 changes: 8 additions & 8 deletions pandas/tests/computation/test_eval.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
import pandas.util.testing as tm
from pandas.util.testing import (assert_frame_equal, randbool,
assert_numpy_array_equal, assert_series_equal,
assert_produces_warning, slow)
assert_produces_warning)
from pandas.compat import PY3, reduce

_series_frame_incompatible = _bool_ops_syms
Expand Down Expand Up @@ -144,7 +144,7 @@ def teardown_method(self, method):
del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses
del self.pandas_rhses, self.pandas_lhses, self.current_engines

@slow
@pytest.mark.slow
def test_complex_cmp_ops(self):
cmp_ops = ('!=', '==', '<=', '>=', '<', '>')
cmp2_ops = ('>', '<')
Expand All @@ -161,7 +161,7 @@ def test_simple_cmp_ops(self):
for lhs, rhs, cmp_op in product(bool_lhses, bool_rhses, self.cmp_ops):
self.check_simple_cmp_op(lhs, cmp_op, rhs)

@slow
@pytest.mark.slow
def test_binary_arith_ops(self):
for lhs, op, rhs in product(self.lhses, self.arith_ops, self.rhses):
self.check_binary_arith_op(lhs, op, rhs)
Expand All @@ -181,17 +181,17 @@ def test_pow(self):
for lhs, rhs in product(self.lhses, self.rhses):
self.check_pow(lhs, '**', rhs)

@slow
@pytest.mark.slow
def test_single_invert_op(self):
for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):
self.check_single_invert_op(lhs, op, rhs)

@slow
@pytest.mark.slow
def test_compound_invert_op(self):
for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):
self.check_compound_invert_op(lhs, op, rhs)

@slow
@pytest.mark.slow
def test_chained_cmp_op(self):
mids = self.lhses
cmp_ops = '<', '>'
Expand Down Expand Up @@ -870,7 +870,7 @@ def test_frame_comparison(self, engine, parser):
res = pd.eval('df < df3', engine=engine, parser=parser)
assert_frame_equal(res, df < df3)

@slow
@pytest.mark.slow
def test_medium_complex_frame_alignment(self, engine, parser):
args = product(self.lhs_index_types, self.index_types,
self.index_types, self.index_types)
Expand Down Expand Up @@ -974,7 +974,7 @@ def test_series_frame_commutativity(self, engine, parser):
if engine == 'numexpr':
assert_frame_equal(a, b)

@slow
@pytest.mark.slow
def test_complex_series_frame_alignment(self, engine, parser):
import random
args = product(self.lhs_index_types, self.index_types,
Expand Down
5 changes: 3 additions & 2 deletions pandas/tests/frame/test_repr_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from numpy import nan
import numpy as np
import pytest

from pandas import (DataFrame, compat, option_context)
from pandas.compat import StringIO, lrange, u
Expand Down Expand Up @@ -40,7 +41,7 @@ def test_repr_mixed(self):
foo = repr(self.mixed_frame) # noqa
self.mixed_frame.info(verbose=False, buf=buf)

@tm.slow
@pytest.mark.slow
def test_repr_mixed_big(self):
# big mixed
biggie = DataFrame({'A': np.random.randn(200),
Expand Down Expand Up @@ -87,7 +88,7 @@ def test_repr_dimensions(self):
with option_context('display.show_dimensions', 'truncate'):
assert "2 rows x 2 columns" not in repr(df)

@tm.slow
@pytest.mark.slow
def test_repr_big(self):
# big one
biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),
Expand Down
8 changes: 4 additions & 4 deletions pandas/tests/frame/test_to_csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal,
ensure_clean, slow,
ensure_clean,
makeCustomDataframe as mkdf)
import pandas.util.testing as tm

Expand Down Expand Up @@ -205,7 +205,7 @@ def _check_df(df, cols=None):
cols = ['b', 'a']
_check_df(df, cols)

@slow
@pytest.mark.slow
def test_to_csv_dtnat(self):
# GH3437
from pandas import NaT
Expand Down Expand Up @@ -236,7 +236,7 @@ def make_dtnat_arr(n, nnat=None):
assert_frame_equal(df, recons, check_names=False,
check_less_precise=True)

@slow
@pytest.mark.slow
def test_to_csv_moar(self):

def _do_test(df, r_dtype=None, c_dtype=None,
Expand Down Expand Up @@ -728,7 +728,7 @@ def test_to_csv_chunking(self):
rs = read_csv(filename, index_col=0)
assert_frame_equal(rs, aa)

@slow
@pytest.mark.slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
Expand Down
7 changes: 4 additions & 3 deletions pandas/tests/indexing/test_indexing_slow.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,12 @@
import pandas as pd
from pandas.core.api import Series, DataFrame, MultiIndex
import pandas.util.testing as tm
import pytest


class TestIndexingSlow(object):

@tm.slow
@pytest.mark.slow
def test_multiindex_get_loc(self): # GH7724, GH2646

with warnings.catch_warnings(record=True):
Expand Down Expand Up @@ -80,15 +81,15 @@ def loop(mi, df, keys):
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)

@tm.slow
@pytest.mark.slow
def test_large_dataframe_indexing(self):
# GH10692
result = DataFrame({'x': range(10 ** 6)}, dtype='int64')
result.loc[len(result)] = len(result) + 1
expected = DataFrame({'x': range(10 ** 6 + 1)}, dtype='int64')
tm.assert_frame_equal(result, expected)

@tm.slow
@pytest.mark.slow
def test_large_mi_dataframe_indexing(self):
# GH10645
result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/parser/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -664,7 +664,7 @@ def test_url(self):
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing

@tm.slow
@pytest.mark.slow
def test_file(self):
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salaries.csv')
Expand Down
2 changes: 1 addition & 1 deletion pandas/tests/io/test_excel.py
Original file line number Diff line number Diff line change
Expand Up @@ -614,7 +614,7 @@ def test_read_from_s3_url(self):
local_table = self.get_exceldf('test1')
tm.assert_frame_equal(url_table, local_table)

@tm.slow
@pytest.mark.slow
def test_read_from_file_url(self):

# FILE
Expand Down
36 changes: 18 additions & 18 deletions pandas/tests/io/test_html.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def test_spam_url(self):

assert_framelist_equal(df1, df2)

@tm.slow
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(self.banklist_data, '.*Florida.*',
attrs={'id': 'table'})
Expand Down Expand Up @@ -292,7 +292,7 @@ def test_invalid_url(self):
except ValueError as e:
assert str(e) == 'No tables found'

@tm.slow
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(url), 'First',
Expand All @@ -301,7 +301,7 @@ def test_file_url(self):
for df in dfs:
assert isinstance(df, DataFrame)

@tm.slow
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with tm.assert_raises_regex(ValueError, 'No tables found'):
Expand All @@ -312,39 +312,39 @@ def _bank_data(self, *args, **kwargs):
return self.read_html(self.banklist_data, 'Metcalf',
attrs={'id': 'table'}, *args, **kwargs)

@tm.slow
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)

@tm.slow
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)

@tm.slow
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)

@tm.slow
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1, tupleize_cols=True)[0]
assert isinstance(df.columns, Index)

@tm.slow
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)

@tm.slow
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)

@tm.slow
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(file_path_to_url(url),
Expand Down Expand Up @@ -372,7 +372,7 @@ def test_python_docs_table(self):
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(['Repo', 'What'])

@tm.slow
@pytest.mark.slow
def test_thousands_macau_stats(self):
all_non_nan_table_index = -2
macau_data = os.path.join(DATA_PATH, 'macau.html')
Expand All @@ -382,7 +382,7 @@ def test_thousands_macau_stats(self):

assert not any(s.isnull().any() for _, s in df.iteritems())

@tm.slow
@pytest.mark.slow
def test_thousands_macau_index_col(self):
all_non_nan_table_index = -2
macau_data = os.path.join(DATA_PATH, 'macau.html')
Expand Down Expand Up @@ -523,7 +523,7 @@ def test_nyse_wsj_commas_table(self):
assert df.shape[0] == nrows
tm.assert_index_equal(df.columns, columns)

@tm.slow
@pytest.mark.slow
def test_banklist_header(self):
from pandas.io.html import _remove_whitespace

Expand Down Expand Up @@ -562,7 +562,7 @@ def try_remove_ws(x):
coerce=True)
tm.assert_frame_equal(converted, gtnew)

@tm.slow
@pytest.mark.slow
def test_gold_canyon(self):
gc = 'Gold Canyon'
with open(self.banklist_data, 'r') as f:
Expand Down Expand Up @@ -855,7 +855,7 @@ def test_works_on_valid_markup(self):
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)

@tm.slow
@pytest.mark.slow
def test_fallback_success(self):
_skip_if_none_of(('bs4', 'html5lib'))
banklist_data = os.path.join(DATA_PATH, 'banklist.html')
Expand Down Expand Up @@ -898,7 +898,7 @@ def get_elements_from_file(url, element='table'):
return soup.find_all(element)


@tm.slow
@pytest.mark.slow
def test_bs4_finds_tables():
filepath = os.path.join(DATA_PATH, "spam.html")
with warnings.catch_warnings():
Expand All @@ -913,13 +913,13 @@ def get_lxml_elements(url, element):
return doc.xpath('.//{0}'.format(element))


@tm.slow
@pytest.mark.slow
def test_lxml_finds_tables():
filepath = os.path.join(DATA_PATH, "spam.html")
assert get_lxml_elements(filepath, 'table')


@tm.slow
@pytest.mark.slow
def test_lxml_finds_tbody():
filepath = os.path.join(DATA_PATH, "spam.html")
assert get_lxml_elements(filepath, 'tbody')
Expand Down
Loading

0 comments on commit 9d13227

Please sign in to comment.