Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support freq in DatetimeIndex #14593

Merged
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 52 additions & 6 deletions python/cudf/cudf/core/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
Tuple,
Type,
Union,
cast,
)

import cupy
Expand Down Expand Up @@ -1432,9 +1433,14 @@ def __repr__(self):
if self.name is not None:
lines[-1] = lines[-1] + ", name='%s'" % self.name
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
if "length" in tmp_meta:
lines[-1] = lines[-1] + ", length=%d)" % len(self)
else:
lines[-1] = lines[-1] + ")"
lines[-1] = lines[-1] + ", length=%d" % len(self)
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
if (
"freq" in tmp_meta
and isinstance(self, DatetimeIndex)
and self._freq is not None
):
lines[-1] = lines[-1] + f", freq={self._freq}"
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
lines[-1] = lines[-1] + ")"
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved

return "\n".join(lines)

Expand Down Expand Up @@ -2126,8 +2132,6 @@ def __init__(
# pandas dtindex creation first which. For now
# just make sure we handle np.datetime64 arrays
# and then just dispatch upstream
if freq is not None:
raise NotImplementedError("Freq is not yet supported")
if tz is not None:
raise NotImplementedError("tz is not yet supported")
if normalize is not False:
Expand All @@ -2141,6 +2145,8 @@ def __init__(
if yearfirst is not False:
raise NotImplementedError("yearfirst == True is not yet supported")

self._freq = _validate_freq(freq)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

While looking on adding freq support before, I found some APIs manipulate freq(to new values) and return new results. (I vaguely remember..but I think that happens in binops?) Should we add a TODO comment here that this is not fully functional yet and freq support needs to be added in rest of the code-base?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, although maybe the default behaviour could be for DatetimeIndex to infer freq from its values. Then this should just work.

Also, we should probably only do that in compatibility mode for perf reasons.


valid_dtypes = tuple(
f"datetime64[{res}]" for res in ("s", "ms", "us", "ns")
)
Expand All @@ -2158,6 +2164,27 @@ def __init__(

super().__init__(data, **kwargs)

if self._freq is not None:
unique_vals = self.diff().unique()
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
if len(unique_vals) != 1 or unique_vals[0] != self._freq:
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
raise ValueError()
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved

@_cudf_nvtx_annotate
def _copy_type_metadata(
self: DatetimeIndex, other: DatetimeIndex, *, override_dtypes=None
) -> GenericIndex:
super()._copy_type_metadata(other, override_dtypes=override_dtypes)
self._freq = other._freq
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
return self

@classmethod
def _from_data(
cls, data: MutableMapping, name: Any = no_default, freq: Any = None
):
result = super()._from_data(data, name)
result._freq = _validate_freq(freq)
galipremsagar marked this conversation as resolved.
Show resolved Hide resolved
return result

def __getitem__(self, index):
value = super().__getitem__(index)
if cudf.get_option("mode.pandas_compatible") and isinstance(
Expand All @@ -2166,6 +2193,11 @@ def __getitem__(self, index):
return pd.Timestamp(value)
return value

@_cudf_nvtx_annotate
def copy(self, name=None, deep=False, dtype=None, names=None):
idx_copy = super().copy(name=name, deep=deep, dtype=dtype, names=names)
return idx_copy._copy_type_metadata(self)

def searchsorted(
self,
value,
Expand Down Expand Up @@ -2519,7 +2551,13 @@ def to_pandas(self, *, nullable: bool = False) -> pd.DatetimeIndex:
)
else:
nanos = self._values.astype("datetime64[ns]")
return pd.DatetimeIndex(nanos.to_pandas(), name=self.name)

freq = (
self._freq._maybe_as_fast_pandas_offset()
if self._freq is not None
else None
)
return pd.DatetimeIndex(nanos.to_pandas(), name=self.name, freq=freq)

@_cudf_nvtx_annotate
def _get_dt_field(self, field):
Expand Down Expand Up @@ -3624,3 +3662,11 @@ def _extended_gcd(a: int, b: int) -> Tuple[int, int, int]:
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t


def _validate_freq(freq: Any) -> cudf.DateOffset:
if isinstance(freq, str):
return cudf.DateOffset._from_freqstr(freq)
elif freq is not None and not isinstance(freq, cudf.DateOffset):
raise ValueError(f"Invalid frequency: {freq}")
return cast(cudf.DateOffset, freq)
14 changes: 8 additions & 6 deletions python/cudf/cudf/core/tools/datetimes.py
Original file line number Diff line number Diff line change
Expand Up @@ -463,13 +463,19 @@ class DateOffset:
}

_CODES_TO_UNITS = {
"N": "nanoseconds",
Copy link
Contributor

@bdice bdice Dec 7, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have some vague recollection that we left these out on purpose... hmm. I think there was some pandas behavior for which "L" and "ms" were okay but "N", "U", "T", etc. were not supported. We'd probably be able to tell if there are any newly failing pandas tests? I'd just check to see where _CODES_TO_UNITS is used and if there are any inconsistencies with this across different APIs.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There were a bunch of failing tests without these changes, adding these units passed the cudf pytests.

There is only slight increase in pandas-pytest failures:

# This PR:
= 12094 failed, 174794 passed, 3850 skipped, 3314 xfailed, 8 xpassed, 21406 warnings, 102 errors in 1516.39s (0:25:16) =

# `branch-24.02`:
= 11607 failed, 175286 passed, 3849 skipped, 3312 xfailed, 11 xpassed, 21414 warnings, 97 errors in 1493.35s (0:24:53) =

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sounds good, thanks for checking.

"ns": "nanoseconds",
"U": "microseconds",
"us": "microseconds",
"ms": "milliseconds",
"L": "milliseconds",
"s": "seconds",
"S": "seconds",
"m": "minutes",
"min": "minutes",
"T": "minutes",
"h": "hours",
"H": "hours",
"D": "days",
"W": "weeks",
"M": "months",
Expand All @@ -487,7 +493,7 @@ class DateOffset:
pd_offset.Nano: "nanoseconds",
}

_FREQSTR_REGEX = re.compile("([0-9]*)([a-zA-Z]+)")
_FREQSTR_REGEX = re.compile("([-+]?[0-9]*)([a-zA-Z]+)")

def __init__(self, n=1, normalize=False, **kwds):
if normalize:
Expand Down Expand Up @@ -843,10 +849,6 @@ def date_range(
arr = cp.linspace(start=start, stop=end, num=periods)
result = cudf.core.column.as_column(arr).astype("datetime64[ns]")
return cudf.DatetimeIndex._from_data({name: result})
elif cudf.get_option("mode.pandas_compatible"):
raise NotImplementedError(
"`DatetimeIndex` with `freq` cannot be constructed."
)

# The code logic below assumes `freq` is defined. It is first normalized
# into `DateOffset` for further computation with timestamps.
Expand Down Expand Up @@ -940,7 +942,7 @@ def date_range(
arr = cp.arange(start=start, stop=stop, step=step, dtype="int64")
res = cudf.core.column.as_column(arr).astype("datetime64[ns]")

return cudf.DatetimeIndex._from_data({name: res})
return cudf.DatetimeIndex._from_data({name: res}, freq=freq)


def _has_fixed_frequency(freq: DateOffset) -> bool:
Expand Down
8 changes: 8 additions & 0 deletions python/cudf/cudf/pandas/_wrappers/pandas.py
Original file line number Diff line number Diff line change
Expand Up @@ -707,6 +707,14 @@ def Index__new__(cls, *args, **kwargs):
"Resampler", cudf.core.resample._Resampler, pd_Resampler
)

DataFrameResampler = make_intermediate_proxy_type(
"DataFrameResampler", cudf.core.resample.DataFrameResampler, pd_Resampler
)

SeriesResampler = make_intermediate_proxy_type(
"SeriesResampler", cudf.core.resample.SeriesResampler, pd_Resampler
)

StataReader = make_intermediate_proxy_type(
"StataReader",
_Unusable,
Expand Down
86 changes: 83 additions & 3 deletions python/cudf/cudf/tests/test_datetime.py
Original file line number Diff line number Diff line change
Expand Up @@ -1571,6 +1571,44 @@ def test_date_range_start_end_freq(request, start, end, freq):
reason="https://github.com/rapidsai/cudf/issues/12133",
)
)
request.applymarker(
pytest.mark.xfail(
condition=(
isinstance(freq, dict)
and freq.get("hours", None) == 10
and freq.get("days", None) == 57
and freq.get("nanoseconds", None) == 3
and (
(
start == "1996-11-21 04:05:30"
and end == "2000-02-13 08:41:06"
)
or (
start == "1970-01-01 00:00:00"
and end == "2000-02-13 08:41:06"
)
or (
start == "1970-01-01 00:00:00"
and end == "1996-11-21 04:05:30"
)
or (
start == "1831-05-08 15:23:21"
and end == "2000-02-13 08:41:06"
)
or (
start == "1831-05-08 15:23:21"
and end == "1996-11-21 04:05:30"
)
or (
start == "1831-05-08 15:23:21"
and end == "1970-01-01 00:00:00"
)
)
),
reason="Nanosecond offsets being dropped by pandas, which is "
"fixed in pandas-2.0+",
)
)
if isinstance(freq, str):
_gfreq = _pfreq = freq
else:
Expand All @@ -1586,7 +1624,29 @@ def test_date_range_start_end_freq(request, start, end, freq):
)


def test_date_range_start_freq_periods(start, freq, periods):
def test_date_range_start_freq_periods(request, start, freq, periods):
request.applymarker(
pytest.mark.xfail(
condition=(
isinstance(freq, dict)
and freq.get("hours", None) == 10
and freq.get("days", None) == 57
and freq.get("nanoseconds", None) == 3
and periods in (10, 100)
and (
start
in {
"2000-02-13 08:41:06",
"1996-11-21 04:05:30",
"1970-01-01 00:00:00",
"1831-05-08 15:23:21",
}
)
),
reason="Nanosecond offsets being dropped by pandas, which is "
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this better solved by fixing the condition on the parameter, which should be "pandas < 2.0"?

https://github.com/shwina/cudf/blob/ed3ba3ff17cf686d1e6e38f01073d27b1be64799/python/cudf/cudf/tests/test_datetime.py#L1512

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wanted to do that but it happens only for a few parameter combinations and we currently xpass/xfail strictly. That's the reason for the current approach.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I know we have two diverging approaches at the same place but I plan on dropping these in pandas-2.0 feature branch.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay. We can clean it up later.

"fixed in pandas-2.0+",
)
)
if isinstance(freq, str):
_gfreq = _pfreq = freq
else:
Expand All @@ -1613,6 +1673,28 @@ def test_date_range_end_freq_periods(request, end, freq, periods):
reason="https://github.com/pandas-dev/pandas/issues/46877",
)
)
request.applymarker(
pytest.mark.xfail(
condition=(
isinstance(freq, dict)
and freq.get("hours", None) == 10
and freq.get("days", None) == 57
and freq.get("nanoseconds", None) == 3
and periods in (10, 100)
and (
end
in {
"2000-02-13 08:41:06",
"1996-11-21 04:05:30",
"1970-01-01 00:00:00",
"1831-05-08 15:23:21",
}
)
),
reason="Nanosecond offsets being dropped by pandas, which is "
"fixed in pandas-2.0+",
)
)
if isinstance(freq, str):
_gfreq = _pfreq = freq
else:
Expand Down Expand Up @@ -2163,8 +2245,6 @@ def test_datetime_getitem_na():

def test_daterange_pandas_compatibility():
with cudf.option_context("mode.pandas_compatible", True):
with pytest.raises(NotImplementedError):
cudf.date_range("20010101", "20020215", freq="400h", name="times")
expected = pd.date_range(
"2010-01-01", "2010-02-01", periods=10, name="times"
)
Expand Down
Loading