Skip to content

Commit 82efa8b

Browse files
committed
dev
1 parent 229e383 commit 82efa8b

21 files changed

+804
-428
lines changed

cf/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -264,6 +264,7 @@
264264
FullArray,
265265
GatheredArray,
266266
NetCDFArray,
267+
PointTopologyArray,
267268
RaggedContiguousArray,
268269
RaggedIndexedArray,
269270
RaggedIndexedContiguousArray,

cf/cfimplementation.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
CFANetCDFArray,
3333
GatheredArray,
3434
NetCDFArray,
35+
PointTopologyArray,
3536
RaggedContiguousArray,
3637
RaggedIndexedArray,
3738
RaggedIndexedContiguousArray,
@@ -175,6 +176,7 @@ def initialise_CFANetCDFArray(
175176
CellConnectivityArray=CellConnectivityArray,
176177
GatheredArray=GatheredArray,
177178
NetCDFArray=NetCDFArray,
179+
PointTopologyArray=PointTopologyArray,
178180
RaggedContiguousArray=RaggedContiguousArray,
179181
RaggedIndexedArray=RaggedIndexedArray,
180182
RaggedIndexedContiguousArray=RaggedIndexedContiguousArray,
@@ -229,6 +231,7 @@ def implementation():
229231
'Data': cf.data.data.Data,
230232
'GatheredArray': cf.data.array.gatheredarray.GatheredArray,
231233
'NetCDFArray': cf.data.array.netcdfarray.NetCDFArray,
234+
'PointTopologyArray': <class 'cf.data.array.pointtopologyarray.PointTopologyArray'>,
232235
'RaggedContiguousArray': cf.data.array.raggedcontiguousarray.RaggedContiguousArray,
233236
'RaggedIndexedArray': cf.data.array.raggedindexedarray.RaggedIndexedArray,
234237
'RaggedIndexedContiguousArray': cf.data.array.raggedindexedcontiguousarray.RaggedIndexedContiguousArray,

cf/data/array/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from .fullarray import FullArray
55
from .gatheredarray import GatheredArray
66
from .netcdfarray import NetCDFArray
7+
from .pointtopologyarray import PointTopologyArray
78
from .raggedcontiguousarray import RaggedContiguousArray
89
from .raggedindexedarray import RaggedIndexedArray
910
from .raggedindexedcontiguousarray import RaggedIndexedContiguousArray

cf/data/array/boundsfromnodesarray.py

Lines changed: 8 additions & 97 deletions
Original file line numberDiff line numberDiff line change
@@ -11,20 +11,15 @@ class BoundsFromNodesArray(
1111
Container,
1212
cfdm.BoundsFromNodesArray,
1313
):
14-
"""An underlying gathered array.
14+
"""An array of cell bounds defined by UGRID node coordinates.
1515
16-
TODOUGRID
16+
The UGRID node coordinates contain the locations of the nodes of
17+
the domain topology. In UGRID, the bounds of edge, face and volume
18+
cells may be defined by the these locations in conjunction with a
19+
mapping from from each cell boundary vertex to its corresponding
20+
coordinate value.
1721
18-
Compression by gathering combines axes of a multidimensional array
19-
into a new, discrete axis whilst omitting the missing values and
20-
thus reducing the number of values that need to be stored.
21-
22-
The information needed to uncompress the data is stored in a "list
23-
variable" that gives the indices of the required points.
24-
25-
See CF section 8.2. "Lossless Compression by Gathering".
26-
27-
.. versionadded:: TODOUGRIDVER
22+
.. versionadded:: UGRIDVER
2823
2924
"""
3025

@@ -33,91 +28,7 @@ def __repr__(self):
3328
3429
x.__repr__() <==> repr(x)
3530
36-
.. versionadded:: TODOUGRIDVER
31+
.. versionadded:: UGRIDVER
3732
3833
"""
3934
return super().__repr__().replace("<", "<CF ", 1)
40-
41-
42-
# def to_dask_array(self, chunks="auto"):
43-
# """Convert the data to a `dask` array.
44-
#
45-
# .. versionadded:: TODOUGRIDVER
46-
#
47-
# :Parameters:
48-
#
49-
# chunks: `int`, `tuple`, `dict` or `str`, optional
50-
# Specify the chunking of the returned dask array.
51-
#
52-
# Any value accepted by the *chunks* parameter of the
53-
# `dask.array.from_array` function is allowed.
54-
#
55-
# The chunk sizes implied by *chunks* for a dimension that
56-
# has been fragmented are ignored and replaced with values
57-
# that are implied by that dimensions fragment sizes.
58-
#
59-
# :Returns:
60-
#
61-
# `dask.array.Array`
62-
# The `dask` array representation.
63-
#
64-
# """
65-
# from functools import partial
66-
#
67-
# import dask.array as da
68-
# from dask import config
69-
# from dask.array.core import getter, normalize_chunks
70-
# from dask.base import tokenize
71-
#
72-
# name = (f"{self.__class__.__name__}-{tokenize(self)}",)
73-
#
74-
# dtype = self.dtype
75-
#
76-
# context = partial(config.set, scheduler="synchronous")
77-
#
78-
# compressed_dimensions = self.compressed_dimensions()
79-
# conformed_data = self.conformed_data()
80-
#
81-
# # If possible, convert the compressed data to a dask array
82-
# # that doesn't support concurrent reads. This prevents
83-
# # "compute called by compute" failures problems at compute
84-
# # time.
85-
# #
86-
# # TODO: This won't be necessary if this is refactored so that
87-
# # the compressed data is part of the same dask graph as
88-
# # the compressed subarrays.
89-
# conformed_data = {
90-
# k: self._lock_file_read(v) for k, v in conformed_data.items()
91-
# }
92-
#
93-
# # Get the (cfdm) subarray class
94-
# Subarray = self.get_Subarray()
95-
# subarray_name = Subarray().__class__.__name__
96-
#
97-
# # Set the chunk sizes for the dask array
98-
# chunks = self.subarray_shapes(chunks)
99-
# chunks = normalize_chunks(
100-
# self.subarray_shapes(chunks),
101-
# shape=self.shape,
102-
# dtype=dtype,
103-
# )
104-
#
105-
# dsk = {}
106-
# for u_indices, u_shape, c_indices, chunk_location in zip(
107-
# *self.subarrays(chunks)
108-
# ):
109-
# subarray = Subarray(
110-
# indices=c_indices,
111-
# shape=u_shape,
112-
# compressed_dimensions=compressed_dimensions,
113-
# context_manager=context,
114-
# **conformed_data,
115-
# )
116-
#
117-
# key = f"{subarray_name}-{tokenize(subarray)}"
118-
# dsk[key] = subarray
119-
#
120-
# dsk[name + chunk_location] = (getter, key, Ellipsis, False, False)
121-
#
122-
# # Return the dask array
123-
# return da.Array(dsk, name[0], chunks=chunks, dtype=dtype)

cf/data/array/cellconnectivityarray.py

Lines changed: 9 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -11,20 +11,17 @@ class CellConnectivityArray(
1111
Container,
1212
cfdm.CellConnectivityArray,
1313
):
14-
"""An underlying gathered array.
14+
"""A connectivity array derived from a UGRID connectivity variable.
1515
16-
TODOUGRID
16+
A UGRID connectivity variable contains indices which map each cell
17+
to its neighbours, as found in a UGRID "edge_edge_connectivty",
18+
"face_face_connectivty", or "volume_volume_connectivty" variable.
1719
18-
Compression by gathering combines axes of a multidimensional array
19-
into a new, discrete axis whilst omitting the missing values and
20-
thus reducing the number of values that need to be stored.
20+
The connectivity array has one more column than the corresponding
21+
UGRID variable. The extra column, in the first position, contains
22+
the identifier for each cell.
2123
22-
The information needed to uncompress the data is stored in a "list
23-
variable" that gives the indices of the required points.
24-
25-
See CF section 8.2. "Lossless Compression by Gathering".
26-
27-
.. versionadded:: TODOUGRIDVER
24+
.. versionadded:: UGRIDVER
2825
2926
"""
3027

@@ -33,7 +30,7 @@ def __repr__(self):
3330
3431
x.__repr__() <==> repr(x)
3532
36-
.. versionadded:: TODOUGRIDVER
33+
.. versionadded:: UGRIDVER
3734
3835
"""
3936
return super().__repr__().replace("<", "<CF ", 1)

cf/data/array/gatheredarray.py

Lines changed: 88 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -32,86 +32,91 @@ def __repr__(self):
3232
"""
3333
return super().__repr__().replace("<", "<CF ", 1)
3434

35-
def to_dask_array(self, chunks="auto"):
36-
"""Convert the data to a `dask` array.
37-
38-
.. versionadded:: 3.14.0
39-
40-
:Parameters:
41-
42-
chunks: `int`, `tuple`, `dict` or `str`, optional
43-
Specify the chunking of the returned dask array.
44-
45-
Any value accepted by the *chunks* parameter of the
46-
`dask.array.from_array` function is allowed.
47-
48-
The chunk sizes implied by *chunks* for a dimension that
49-
has been fragmented are ignored and replaced with values
50-
that are implied by that dimensions fragment sizes.
51-
52-
:Returns:
53-
54-
`dask.array.Array`
55-
The `dask` array representation.
56-
57-
"""
58-
from functools import partial
59-
60-
import dask.array as da
61-
from dask import config
62-
from dask.array.core import getter, normalize_chunks
63-
from dask.base import tokenize
64-
65-
name = (f"{self.__class__.__name__}-{tokenize(self)}",)
66-
67-
dtype = self.dtype
68-
69-
context = partial(config.set, scheduler="synchronous")
70-
71-
compressed_dimensions = self.compressed_dimensions()
72-
conformed_data = self.conformed_data()
73-
compressed_data = conformed_data["data"]
74-
uncompressed_indices = conformed_data["uncompressed_indices"]
75-
76-
# If possible, convert the compressed data to a dask array
77-
# that doesn't support concurrent reads. This prevents
78-
# "compute called by compute" failures problems at compute
79-
# time.
80-
#
81-
# TODO: This won't be necessary if this is refactored so that
82-
# the compressed data is part of the same dask graph as
83-
# the compressed subarrays.
84-
compressed_data = self._lock_file_read(compressed_data)
85-
86-
# Get the (cfdm) subarray class
87-
Subarray = self.get_Subarray()
88-
subarray_name = Subarray().__class__.__name__
89-
90-
# Set the chunk sizes for the dask array
91-
chunks = self.subarray_shapes(chunks)
92-
chunks = normalize_chunks(
93-
self.subarray_shapes(chunks),
94-
shape=self.shape,
95-
dtype=dtype,
96-
)
97-
98-
dsk = {}
99-
for u_indices, u_shape, c_indices, chunk_location in zip(
100-
*self.subarrays(chunks)
101-
):
102-
subarray = Subarray(
103-
data=compressed_data,
104-
indices=c_indices,
105-
shape=u_shape,
106-
compressed_dimensions=compressed_dimensions,
107-
uncompressed_indices=uncompressed_indices,
108-
context_manager=context,
109-
)
110-
111-
key = f"{subarray_name}-{tokenize(subarray)}"
112-
dsk[key] = subarray
113-
114-
dsk[name + chunk_location] = (getter, key, Ellipsis, False, False)
115-
116-
# Return the dask array
117-
return da.Array(dsk, name[0], chunks=chunks, dtype=dtype)
35+
def subarray_parameters(self):
36+
"""TODOUGRID"""
37+
return {}
38+
39+
40+
# def to_dask_array(self, chunks="auto"):
41+
# """Convert the data to a `dask` array.
42+
#
43+
# .. versionadded:: 3.14.0
44+
#
45+
# :Parameters:
46+
#
47+
# chunks: `int`, `tuple`, `dict` or `str`, optional
48+
# Specify the chunking of the returned dask array.
49+
#
50+
# Any value accepted by the *chunks* parameter of the
51+
# `dask.array.from_array` function is allowed.
52+
#
53+
# The chunk sizes implied by *chunks* for a dimension that
54+
# has been fragmented are ignored and replaced with values
55+
# that are implied by that dimensions fragment sizes.
56+
#
57+
# :Returns:
58+
#
59+
# `dask.array.Array`
60+
# The `dask` array representation.
61+
#
62+
# """
63+
# from functools import partial
64+
#
65+
# import dask.array as da
66+
# from dask import config
67+
# from dask.array.core import getter, normalize_chunks
68+
# from dask.base import tokenize
69+
#
70+
# name = (f"{self.__class__.__name__}-{tokenize(self)}",)
71+
#
72+
# dtype = self.dtype
73+
#
74+
# context = partial(config.set, scheduler="synchronous")
75+
#
76+
# compressed_dimensions = self.compressed_dimensions()
77+
# conformed_data = self.conformed_data()
78+
# compressed_data = conformed_data["data"]
79+
# uncompressed_indices = conformed_data["uncompressed_indices"]
80+
#
81+
# # If possible, convert the compressed data to a dask array
82+
# # that doesn't support concurrent reads. This prevents
83+
# # "compute called by compute" failures problems at compute
84+
# # time.
85+
# #
86+
# # TODO: This won't be necessary if this is refactored so that
87+
# # the compressed data is part of the same dask graph as
88+
# # the compressed subarrays.
89+
# compressed_data = self._lock_file_read(compressed_data)
90+
#
91+
# # Get the (cfdm) subarray class
92+
# Subarray = self.get_Subarray()
93+
# subarray_name = Subarray().__class__.__name__
94+
#
95+
# # Set the chunk sizes for the dask array
96+
# # chunks = self.subarray_shapes(chunks)
97+
# chunks = normalize_chunks(
98+
# self.subarray_shapes(chunks),
99+
# shape=self.shape,
100+
# dtype=dtype,
101+
# )
102+
#
103+
# dsk = {}
104+
# for u_indices, u_shape, c_indices, chunk_location in zip(
105+
# *self.subarrays(chunks)
106+
# ):
107+
# subarray = Subarray(
108+
# data=compressed_data,
109+
# indices=c_indices,
110+
# shape=u_shape,
111+
# compressed_dimensions=compressed_dimensions,
112+
# uncompressed_indices=uncompressed_indices,
113+
# context_manager=context,
114+
# )
115+
#
116+
# key = f"{subarray_name}-{tokenize(subarray)}"
117+
# dsk[key] = subarray
118+
#
119+
# dsk[name + chunk_location] = (getter, key, Ellipsis, False, False)
120+
#
121+
# # Return the dask array
122+
# return da.Array(dsk, name[0], chunks=chunks, dtype=dtype)

cf/data/array/mixin/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,4 +2,5 @@
22
from .compressedarraymixin import CompressedArrayMixin
33
from .filearraymixin import FileArrayMixin
44
from .mesharraymixin import MeshArrayMixin
5-
from .raggedarraymixin import RaggedArrayMixin
5+
6+
# from .raggedarraymixin import RaggedArrayMixin

0 commit comments

Comments
 (0)