diff --git a/doc/whats-new.rst b/doc/whats-new.rst index 7bfe5991b78..1c838813f6b 100644 --- a/doc/whats-new.rst +++ b/doc/whats-new.rst @@ -38,6 +38,9 @@ Enhancements - Use ``pandas.Grouper`` class in xarray resample methods rather than the deprecated ``pandas.TimeGrouper`` class (:issue:`1766`). By `Joe Hamman `_. +- Support for using `Zarr`_ as storage layer for xarray. (:issue:`1223`). + By `Ryan Abernathey `_ and + `Joe Hamman `_. - Support for using `Zarr`_ as storage layer for xarray. By `Ryan Abernathey `_. - :func:`xarray.plot.imshow` now handles RGB and RGBA images. diff --git a/xarray/backends/common.py b/xarray/backends/common.py index 83753ced8f5..157ee494067 100644 --- a/xarray/backends/common.py +++ b/xarray/backends/common.py @@ -6,7 +6,7 @@ import time import traceback import contextlib -from collections import Mapping +from collections import Mapping, OrderedDict import warnings from ..conventions import cf_encoder @@ -96,6 +96,9 @@ def __getitem__(self, key): def __len__(self): return len(self.variables) + def get_dimensions(self): # pragma: no cover + raise NotImplementedError + def get_attrs(self): # pragma: no cover raise NotImplementedError @@ -195,6 +198,37 @@ def __init__(self, writer=None): writer = ArrayWriter() self.writer = writer + def encode(self, variables, attributes): + """ + Encode the variables and attributes in this store + + Parameters + ---------- + variables : dict-like + Dictionary of key/value (variable name / xr.Variable) pairs + attributes : dict-like + Dictionary of key/value (attribute name / attribute) pairs + + Returns + ------- + variables : dict-like + attributes : dict-like + + """ + variables = OrderedDict([(k, self.encode_variable(v)) + for k, v in variables.items()]) + attributes = OrderedDict([(k, self.encode_attribute(v)) + for k, v in attributes.items()]) + return variables, attributes + + def encode_variable(self, v): + """encode one variable""" + return v + + def encode_attribute(self, a): + """encode one attribute""" + return a + def set_dimension(self, d, l): # pragma: no cover raise NotImplementedError @@ -208,24 +242,74 @@ def sync(self): self.writer.sync() def store_dataset(self, dataset): - # in stores variables are all variables AND coordinates - # in xarray.Dataset variables are variables NOT coordinates, - # so here we pass the whole dataset in instead of doing - # dataset.variables + """ + in stores, variables are all variables AND coordinates + in xarray.Dataset variables are variables NOT coordinates, + so here we pass the whole dataset in instead of doing + dataset.variables + """ self.store(dataset, dataset.attrs) def store(self, variables, attributes, check_encoding_set=frozenset(), unlimited_dims=None): + """ + Top level method for putting data on this store, this method: + - encodes variables/attributes + - sets dimensions + - sets variables + + Parameters + ---------- + variables : dict-like + Dictionary of key/value (variable name / xr.Variable) pairs + attributes : dict-like + Dictionary of key/value (attribute name / attribute) pairs + check_encoding_set : list-like + List of variables that should be checked for invalid encoding + values + unlimited_dims : list-like + List of dimension names that should be treated as unlimited + dimensions. + """ + + variables, attributes = self.encode(variables, attributes) + self.set_attributes(attributes) + self.set_dimensions(variables, unlimited_dims=unlimited_dims) self.set_variables(variables, check_encoding_set, unlimited_dims=unlimited_dims) def set_attributes(self, attributes): + """ + This provides a centralized method to set the dataset attributes on the + data store. + + Parameters + ---------- + attributes : dict-like + Dictionary of key/value (attribute name / attribute) pairs + """ for k, v in iteritems(attributes): self.set_attribute(k, v) def set_variables(self, variables, check_encoding_set, unlimited_dims=None): + """ + This provides a centralized method to set the variables on the data + store. + + Parameters + ---------- + variables : dict-like + Dictionary of key/value (variable name / xr.Variable) pairs + check_encoding_set : list-like + List of variables that should be checked for invalid encoding + values + unlimited_dims : list-like + List of dimension names that should be treated as unlimited + dimensions. + """ + for vn, v in iteritems(variables): name = _encode_variable_name(vn) check = vn in check_encoding_set @@ -234,24 +318,51 @@ def set_variables(self, variables, check_encoding_set, self.writer.add(source, target) - def set_necessary_dimensions(self, variable, unlimited_dims=None): + def set_dimensions(self, variables, unlimited_dims=None): + """ + This provides a centralized method to set the dimensions on the data + store. + + Parameters + ---------- + variables : dict-like + Dictionary of key/value (variable name / xr.Variable) pairs + unlimited_dims : list-like + List of dimension names that should be treated as unlimited + dimensions. + """ if unlimited_dims is None: unlimited_dims = set() - dims = self.get_dimensions() - for d, l in zip(variable.dims, variable.shape): - if d not in dims: - is_unlimited = d in unlimited_dims - self.set_dimension(d, l, is_unlimited) + + existing_dims = self.get_dimensions() + + dims = OrderedDict() + for v in unlimited_dims: # put unlimited_dims first + dims[v] = None + for v in variables.values(): + dims.update(dict(zip(v.dims, v.shape))) + + for dim, length in dims.items(): + if dim in existing_dims and length != existing_dims[dim]: + raise ValueError( + "Unable to update size for existing dimension" + "%r (%d != %d)" % (dim, length, existing_dims[dim])) + elif dim not in existing_dims: + is_unlimited = dim in unlimited_dims + self.set_dimension(dim, length, is_unlimited) class WritableCFDataStore(AbstractWritableDataStore): - def store(self, variables, attributes, *args, **kwargs): + def encode(self, variables, attributes): # All NetCDF files get CF encoded by default, without this attempting # to write times, for example, would fail. - cf_variables, cf_attrs = cf_encoder(variables, attributes) - AbstractWritableDataStore.store(self, cf_variables, cf_attrs, - *args, **kwargs) + variables, attributes = cf_encoder(variables, attributes) + variables = OrderedDict([(k, self.encode_variable(v)) + for k, v in variables.items()]) + attributes = OrderedDict([(k, self.encode_attribute(v)) + for k, v in attributes.items()]) + return variables, attributes class DataStorePickleMixin(object): diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py index 1cd7b65f80b..ca9a46129ac 100644 --- a/xarray/backends/h5netcdf_.py +++ b/xarray/backends/h5netcdf_.py @@ -9,7 +9,7 @@ from ..core.pycompat import iteritems, bytes_type, unicode_type, OrderedDict from .common import WritableCFDataStore, DataStorePickleMixin, find_root -from .netCDF4_ import (_nc4_group, _nc4_values_and_dtype, +from .netCDF4_ import (_nc4_group, _encode_nc4_variable, _get_datatype, _extract_nc4_variable_encoding, BaseNetCDF4Array) @@ -127,14 +127,15 @@ def set_attribute(self, key, value): with self.ensure_open(autoclose=False): self.ds.setncattr(key, value) + def encode_variable(self, variable): + return _encode_nc4_variable(variable) + def prepare_variable(self, name, variable, check_encoding=False, unlimited_dims=None): import h5py attrs = variable.attrs.copy() - variable, dtype = _nc4_values_and_dtype(variable) - - self.set_necessary_dimensions(variable, unlimited_dims=unlimited_dims) + dtype = _get_datatype(variable) fill_value = attrs.pop('_FillValue', None) if dtype is str and fill_value is not None: diff --git a/xarray/backends/memory.py b/xarray/backends/memory.py index 4cecf1e7771..8c09277b2d0 100644 --- a/xarray/backends/memory.py +++ b/xarray/backends/memory.py @@ -30,6 +30,13 @@ def get_attrs(self): def get_variables(self): return self._variables + def get_dimensions(self): + dims = OrderedDict() + for v in self._variables.values(): + for d, s in v.dims.items(): + dims[d] = s + return dims + def prepare_variable(self, k, v, *args, **kwargs): new_var = Variable(v.dims, np.empty_like(v), v.attrs) # we copy the variable and stuff all encodings in the @@ -42,6 +49,6 @@ def set_attribute(self, k, v): # copy to imitate writing to disk. self._attributes[k] = copy.deepcopy(v) - def set_dimension(self, d, l): + def set_dimension(self, d, l, unlimited_dims=None): # in this model, dimensions are accounted for in the variables pass diff --git a/xarray/backends/netCDF4_.py b/xarray/backends/netCDF4_.py index f5185742eb3..9b0044fe438 100644 --- a/xarray/backends/netCDF4_.py +++ b/xarray/backends/netCDF4_.py @@ -74,19 +74,28 @@ def __getitem__(self, key): return data -def _nc4_values_and_dtype(var): +def _encode_nc4_variable(var): + if var.dtype.kind == 'S': + var = conventions.maybe_encode_as_char_array(var) + return var + + +def _get_datatype(var, nc_format='NETCDF4'): + if nc_format == 'NETCDF4': + datatype = _nc4_dtype(var) + else: + datatype = var.dtype + return datatype + + +def _nc4_dtype(var): if var.dtype.kind == 'U': dtype = str - elif var.dtype.kind == 'S': - # use character arrays instead of unicode, because unicode support in - # netCDF4 is still rather buggy - var = conventions.maybe_encode_as_char_array(var) - dtype = var.dtype - elif var.dtype.kind in ['i', 'u', 'f', 'c']: + elif var.dtype.kind in ['i', 'u', 'f', 'c', 'S']: dtype = var.dtype else: raise ValueError('cannot infer dtype for netCDF4 variable') - return var, dtype + return dtype def _nc4_group(ds, group, mode): @@ -325,18 +334,17 @@ def set_variables(self, *args, **kwargs): with self.ensure_open(autoclose=False): super(NetCDF4DataStore, self).set_variables(*args, **kwargs) - def prepare_variable(self, name, variable, check_encoding=False, - unlimited_dims=None): + def encode_variable(self, variable): variable = _force_native_endianness(variable) - if self.format == 'NETCDF4': - variable, datatype = _nc4_values_and_dtype(variable) + variable = _encode_nc4_variable(variable) else: variable = encode_nc3_variable(variable) - datatype = variable.dtype - - self.set_necessary_dimensions(variable, unlimited_dims=unlimited_dims) + return variable + def prepare_variable(self, name, variable, check_encoding=False, + unlimited_dims=None): + datatype = _get_datatype(variable, self.format) attrs = variable.attrs.copy() fill_value = attrs.pop('_FillValue', None) diff --git a/xarray/backends/scipy_.py b/xarray/backends/scipy_.py index 0994d8510b8..dba2e5672a2 100644 --- a/xarray/backends/scipy_.py +++ b/xarray/backends/scipy_.py @@ -181,17 +181,16 @@ def set_attribute(self, key, value): value = encode_nc3_attr_value(value) setattr(self.ds, key, value) + def encode_variable(self, variable): + variable = encode_nc3_variable(variable) + return variable + def prepare_variable(self, name, variable, check_encoding=False, unlimited_dims=None): - variable = encode_nc3_variable(variable) if check_encoding and variable.encoding: raise ValueError('unexpected encoding for scipy backend: %r' % list(variable.encoding)) - if unlimited_dims is not None and len(unlimited_dims) > 1: - raise ValueError('NETCDF3 only supports one unlimited dimension') - self.set_necessary_dimensions(variable, unlimited_dims=unlimited_dims) - data = variable.data # nb. this still creates a numpy array in all memory, even though we # don't write the data yet; scipy.io.netcdf does not not support diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index 53051218761..02753f6cca9 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -44,15 +44,6 @@ def _ensure_valid_fill_value(value, dtype): return _encode_zarr_attr_value(valid) -def _decode_zarr_attr_value(value): - return value - - -def _decode_zarr_attrs(attrs): - return OrderedDict([(k, _decode_zarr_attr_value(v)) - for k, v in attrs.items()]) - - def _replace_slices_with_arrays(key, shape): """Replace slice objects in vindex with equivalent ndarray objects.""" num_slices = sum(1 for k in key if isinstance(k, slice)) @@ -293,15 +284,6 @@ def __init__(self, zarr_group, writer=None): self._synchronizer = self.ds.synchronizer self._group = self.ds.path - if _DIMENSION_KEY not in self.ds.attrs: - if self._read_only: - raise KeyError("Zarr group can't be read by xarray because " - "it is missing the `%s` attribute." % - _DIMENSION_KEY) - else: - # initialize hidden dimension attribute - self.ds.attrs[_DIMENSION_KEY] = {} - if writer is None: # by default, we should not need a lock for writing zarr because # we do not (yet) allow overlapping chunks during write @@ -316,7 +298,7 @@ def open_store_variable(self, name, zarr_array): data = indexing.LazilyIndexedArray(ZarrArrayWrapper(name, self)) dimensions, attributes = _get_zarr_dims_and_attrs(zarr_array, _DIMENSION_KEY) - attributes = _decode_zarr_attrs(attributes) + attributes = OrderedDict(attributes) encoding = {'chunks': zarr_array.chunks, 'compressor': zarr_array.compressor, 'filters': zarr_array.filters} @@ -332,29 +314,40 @@ def get_variables(self): for k, v in self.ds.arrays()) def get_attrs(self): - _, attributes = _get_zarr_dims_and_attrs(self.ds, _DIMENSION_KEY) - return _decode_zarr_attrs(attributes) + attributes = OrderedDict(self.ds.attrs.asdict()) + return attributes def get_dimensions(self): - dimensions, _ = _get_zarr_dims_and_attrs(self.ds, _DIMENSION_KEY) + dimensions = OrderedDict() + for k, v in self.ds.arrays(): + try: + for d, s in zip(v.attrs[_DIMENSION_KEY], v.shape): + if d in dimensions and dimensions[d] != s: + raise ValueError( + 'found conflicting lengths for dimension %s ' + '(%d != %d)' % (d, s, dimensions[d])) + dimensions[d] = s + + except KeyError: + raise KeyError("Zarr object is missing the attribute `%s`, " + "which is required for xarray to determine " + "variable dimensions." % (_DIMENSION_KEY)) return dimensions - def set_dimension(self, name, length, is_unlimited=False): - if is_unlimited: + def set_dimensions(self, variables, unlimited_dims=None): + if unlimited_dims is not None: raise NotImplementedError( "Zarr backend doesn't know how to handle unlimited dimensions") - # consistency check - if name in self.ds.attrs[_DIMENSION_KEY]: - if self.ds.attrs[_DIMENSION_KEY][name] != length: - raise ValueError("Pre-existing array dimensions %r " - "encoded in Zarr attributes are incompatible " - "with newly specified dimension `%s`: %g" % - (self.ds.attrs[_DIMENSION_KEY], name, length)) - self.ds.attrs[_DIMENSION_KEY][name] = length - - def set_attribute(self, key, value): - _, attributes = _get_zarr_dims_and_attrs(self.ds, _DIMENSION_KEY) - attributes[key] = _encode_zarr_attr_value(value) + + def set_attributes(self, attributes): + self.ds.attrs.put(attributes) + + def encode_variable(self, variable): + variable = encode_zarr_variable(variable) + return variable + + def encode_attribute(self, a): + return _encode_zarr_attr_value(a) def prepare_variable(self, name, variable, check_encoding=False, unlimited_dims=None): @@ -364,72 +357,27 @@ def prepare_variable(self, name, variable, check_encoding=False, dtype = variable.dtype shape = variable.shape - # TODO: figure out how zarr should deal with unlimited dimensions - self.set_necessary_dimensions(variable, unlimited_dims=unlimited_dims) - fill_value = _ensure_valid_fill_value(attrs.pop('_FillValue', None), dtype) - # TODO: figure out what encoding is needed for zarr encoding = _extract_zarr_variable_encoding( variable, raise_on_invalid=check_encoding) - # arguments for zarr.create: - # zarr.creation.create(shape, chunks=None, dtype=None, - # compressor='default', fill_value=0, order='C', store=None, - # synchronizer=None, overwrite=False, path=None, chunk_store=None, - # filters=None, cache_metadata=True, **kwargs) - if name in self.ds: - zarr_array = self.ds[name] - else: - zarr_array = self.ds.create(name, shape=shape, dtype=dtype, - fill_value=fill_value, **encoding) - # decided not to explicity enumerate encoding options because we - # risk overriding zarr's defaults (e.g. if we specificy - # cache_metadata=None instead of True). Alternative is to have lots of - # logic in _extract_zarr_variable encoding to duplicate zarr defaults. - # chunks=encoding.get('chunks'), - # compressor=encoding.get('compressor'), - # filters=encodings.get('filters'), - # cache_metadata=encoding.get('cache_metadata')) - + encoded_attrs = OrderedDict() # the magic for storing the hidden dimension data - zarr_array.attrs[_DIMENSION_KEY] = dims - _, attributes = _get_zarr_dims_and_attrs(zarr_array, _DIMENSION_KEY) - + encoded_attrs[_DIMENSION_KEY] = dims for k, v in iteritems(attrs): - attributes[k] = _encode_zarr_attr_value(v) + encoded_attrs[k] = self.encode_attribute(v) + + zarr_array = self.ds.create(name, shape=shape, dtype=dtype, + fill_value=fill_value, **encoding) + zarr_array.attrs.put(encoded_attrs) return zarr_array, variable.data def store(self, variables, attributes, *args, **kwargs): - new_vars = OrderedDict((k, encode_zarr_variable(v, name=k)) - for k, v in iteritems(variables)) - AbstractWritableDataStore.store(self, new_vars, attributes, + AbstractWritableDataStore.store(self, variables, attributes, *args, **kwargs) - # sync() and close() methods should not be needed with zarr - - -# from zarr docs - -# Zarr arrays can be used as either the source or sink for data in parallel -# computations. Both multi-threaded and multi-process parallelism are -# supported. The Python global interpreter lock (GIL) is released for both -# compression and decompression operations, so Zarr will not block other Python -# threads from running. -# -# A Zarr array can be read concurrently by multiple threads or processes. No -# synchronization (i.e., locking) is required for concurrent reads. -# -# A Zarr array can also be written to concurrently by multiple threads or -# processes. Some synchronization may be required, depending on the way the -# data is being written. - -# If each worker in a parallel computation is writing to a separate region of -# the array, and if region boundaries are perfectly aligned with chunk -# boundaries, then no synchronization is required. However, if region and chunk -# boundaries are not perfectly aligned, then synchronization is required to -# avoid two workers attempting to modify the same chunk at the same time. def open_zarr(store, group=None, synchronizer=None, auto_chunk=True, diff --git a/xarray/tests/__init__.py b/xarray/tests/__init__.py index f373d201b03..dadcdeff640 100644 --- a/xarray/tests/__init__.py +++ b/xarray/tests/__init__.py @@ -72,7 +72,7 @@ def _importorskip(modname, minversion=None): has_bottleneck, requires_bottleneck = _importorskip('bottleneck') has_rasterio, requires_rasterio = _importorskip('rasterio') has_pathlib, requires_pathlib = _importorskip('pathlib') -has_zarr, requires_zarr = _importorskip('zarr', minversion='2.2.0') +has_zarr, requires_zarr = _importorskip('zarr', minversion='2.2') has_np112, requires_np112 = _importorskip('numpy', minversion='1.12.0') # some special cases diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index a3256f84bdd..5cbc014f0a4 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -727,6 +727,16 @@ def test_append_overwrite_values(self): with self.open(tmp_file) as actual: assert_identical(data, actual) + def test_append_with_invalid_dim_raises(self): + data = create_test_data() + with create_tmp_file(allow_cleanup_failure=False) as tmp_file: + self.save(data, tmp_file, mode='w') + data['var9'] = data['var2'] * 3 + data = data.isel(dim1=slice(2, 6)) # modify one dimension + with raises_regex(ValueError, + 'Unable to update size for existing dimension'): + self.save(data, tmp_file, mode='a') + def test_vectorized_indexing(self): self._test_vectorized_indexing(vindex_support=False) @@ -1099,12 +1109,26 @@ def create_store(self): with self.create_zarr_target() as store_target: yield backends.ZarrStore.open_group(store_target, mode='w') + def save(self, dataset, store_target, **kwargs): + dataset.to_zarr(store=store_target, **kwargs) + + @contextlib.contextmanager + def open(self, store_target, **kwargs): + with xr.open_zarr(store_target, **kwargs) as ds: + yield ds + @contextlib.contextmanager def roundtrip(self, data, save_kwargs={}, open_kwargs={}, allow_cleanup_failure=False): with self.create_zarr_target() as store_target: - data.to_zarr(store=store_target, **save_kwargs) - yield xr.open_zarr(store_target, **open_kwargs) + self.save(data, store_target, **save_kwargs) + with self.open(store_target, **open_kwargs) as ds: + yield ds + + @contextlib.contextmanager + def roundtrip_append(self, data, save_kwargs={}, open_kwargs={}, + allow_cleanup_failure=False): + pytest.skip("zarr backend does not support appending") def test_auto_chunk(self): original = create_test_data().chunk() @@ -1195,33 +1219,22 @@ def test_hidden_zarr_keys(self): expected.dump_to_store(store) zarr_group = store.ds - # check that the global hidden attribute is present - assert self.DIMENSION_KEY in zarr_group.attrs - # check that a variable hidden attribute is present and correct # JSON only has a single array type, which maps to list in Python. # In contrast, dims in xarray is always a tuple. for var in expected.variables.keys(): - assert (zarr_group[var].attrs[self.DIMENSION_KEY] == - list(expected[var].dims)) + dims = zarr_group[var].attrs[self.DIMENSION_KEY] + assert dims == list(expected[var].dims) - with xr.decode_cf(store) as actual: + with xr.decode_cf(store): # make sure it is hidden - assert self.DIMENSION_KEY not in actual.attrs for var in expected.variables.keys(): assert self.DIMENSION_KEY not in expected[var].attrs - # verify that the dataset fails to open if dimension key is missing - del zarr_group.attrs[self.DIMENSION_KEY] - with pytest.raises(KeyError): - with xr.decode_cf(store) as actual: - pass - # put it back and try removing from a variable - zarr_group.attrs[self.DIMENSION_KEY] = {} del zarr_group.var2.attrs[self.DIMENSION_KEY] with pytest.raises(KeyError): - with xr.decode_cf(store) as actual: + with xr.decode_cf(store): pass def test_write_persistence_modes(self): @@ -1237,13 +1250,13 @@ def test_write_persistence_modes(self): # make sure overwriting works as expected with self.create_zarr_target() as store: - original.to_zarr(store) + self.save(original, store) # should overwrite with no error - original.to_zarr(store, mode='w') - actual = xr.open_zarr(store) - assert_identical(original, actual) - with pytest.raises(ValueError): - original.to_zarr(store, mode='w-') + self.save(original, store, mode='w') + with self.open(store) as actual: + assert_identical(original, actual) + with pytest.raises(ValueError): + self.save(original, store, mode='w-') # check that we can't use other persistence modes # TODO: reconsider whether other persistence modes should be supported @@ -1258,7 +1271,7 @@ def test_compressor_encoding(self): blosc_comp = zarr.Blosc(cname='zstd', clevel=3, shuffle=2) save_kwargs = dict(encoding={'var1': {'compressor': blosc_comp}}) with self.roundtrip(original, save_kwargs=save_kwargs) as actual: - assert actual.var1.encoding['compressor'] == blosc_comp + assert repr(actual.var1.encoding['compressor']) == repr(blosc_comp) def test_group(self): original = create_test_data() @@ -1266,10 +1279,6 @@ def test_group(self): with self.roundtrip(original, save_kwargs={'group': group}, open_kwargs={'group': group}) as actual: assert_identical(original, actual) - with pytest.raises(KeyError): - with self.roundtrip(original, - save_kwargs={'group': group}) as actual: - assert_identical(original, actual) # TODO: implement zarr object encoding and make these tests pass @pytest.mark.xfail(reason="Zarr object encoding not implemented") @@ -1295,6 +1304,18 @@ def test_roundtrip_string_encoded_characters(self): def test_dataset_caching(self): super(CFEncodedDataTest, self).test_dataset_caching() + @pytest.mark.xfail(reason="Zarr stores can not be appended to") + def test_append_write(self): + super(CFEncodedDataTest, self).test_append_write() + + @pytest.mark.xfail(reason="Zarr stores can not be appended to") + def test_append_overwrite_values(self): + super(CFEncodedDataTest, self).test_append_overwrite_values() + + @pytest.mark.xfail(reason="Zarr stores can not be appended to") + def test_append_with_invalid_dim_raises(self): + super(CFEncodedDataTest, self).test_append_with_invalid_dim_raises() + @requires_zarr class ZarrDictStoreTest(BaseZarrTest, TestCase):