@@ -32,86 +32,91 @@ def __repr__(self):
3232 """
3333 return super ().__repr__ ().replace ("<" , "<CF " , 1 )
3434
35- def to_dask_array (self , chunks = "auto" ):
36- """Convert the data to a `dask` array.
37-
38- .. versionadded:: 3.14.0
39-
40- :Parameters:
41-
42- chunks: `int`, `tuple`, `dict` or `str`, optional
43- Specify the chunking of the returned dask array.
44-
45- Any value accepted by the *chunks* parameter of the
46- `dask.array.from_array` function is allowed.
47-
48- The chunk sizes implied by *chunks* for a dimension that
49- has been fragmented are ignored and replaced with values
50- that are implied by that dimensions fragment sizes.
51-
52- :Returns:
53-
54- `dask.array.Array`
55- The `dask` array representation.
56-
57- """
58- from functools import partial
59-
60- import dask .array as da
61- from dask import config
62- from dask .array .core import getter , normalize_chunks
63- from dask .base import tokenize
64-
65- name = (f"{ self .__class__ .__name__ } -{ tokenize (self )} " ,)
66-
67- dtype = self .dtype
68-
69- context = partial (config .set , scheduler = "synchronous" )
70-
71- compressed_dimensions = self .compressed_dimensions ()
72- conformed_data = self .conformed_data ()
73- compressed_data = conformed_data ["data" ]
74- uncompressed_indices = conformed_data ["uncompressed_indices" ]
75-
76- # If possible, convert the compressed data to a dask array
77- # that doesn't support concurrent reads. This prevents
78- # "compute called by compute" failures problems at compute
79- # time.
80- #
81- # TODO: This won't be necessary if this is refactored so that
82- # the compressed data is part of the same dask graph as
83- # the compressed subarrays.
84- compressed_data = self ._lock_file_read (compressed_data )
85-
86- # Get the (cfdm) subarray class
87- Subarray = self .get_Subarray ()
88- subarray_name = Subarray ().__class__ .__name__
89-
90- # Set the chunk sizes for the dask array
91- chunks = self .subarray_shapes (chunks )
92- chunks = normalize_chunks (
93- self .subarray_shapes (chunks ),
94- shape = self .shape ,
95- dtype = dtype ,
96- )
97-
98- dsk = {}
99- for u_indices , u_shape , c_indices , chunk_location in zip (
100- * self .subarrays (chunks )
101- ):
102- subarray = Subarray (
103- data = compressed_data ,
104- indices = c_indices ,
105- shape = u_shape ,
106- compressed_dimensions = compressed_dimensions ,
107- uncompressed_indices = uncompressed_indices ,
108- context_manager = context ,
109- )
110-
111- key = f"{ subarray_name } -{ tokenize (subarray )} "
112- dsk [key ] = subarray
113-
114- dsk [name + chunk_location ] = (getter , key , Ellipsis , False , False )
115-
116- # Return the dask array
117- return da .Array (dsk , name [0 ], chunks = chunks , dtype = dtype )
35+ def subarray_parameters (self ):
36+ """TODOUGRID"""
37+ return {}
38+
39+
40+ # def to_dask_array(self, chunks="auto"):
41+ # """Convert the data to a `dask` array.
42+ #
43+ # .. versionadded:: 3.14.0
44+ #
45+ # :Parameters:
46+ #
47+ # chunks: `int`, `tuple`, `dict` or `str`, optional
48+ # Specify the chunking of the returned dask array.
49+ #
50+ # Any value accepted by the *chunks* parameter of the
51+ # `dask.array.from_array` function is allowed.
52+ #
53+ # The chunk sizes implied by *chunks* for a dimension that
54+ # has been fragmented are ignored and replaced with values
55+ # that are implied by that dimensions fragment sizes.
56+ #
57+ # :Returns:
58+ #
59+ # `dask.array.Array`
60+ # The `dask` array representation.
61+ #
62+ # """
63+ # from functools import partial
64+ #
65+ # import dask.array as da
66+ # from dask import config
67+ # from dask.array.core import getter, normalize_chunks
68+ # from dask.base import tokenize
69+ #
70+ # name = (f"{self.__class__.__name__}-{tokenize(self)}",)
71+ #
72+ # dtype = self.dtype
73+ #
74+ # context = partial(config.set, scheduler="synchronous")
75+ #
76+ # compressed_dimensions = self.compressed_dimensions()
77+ # conformed_data = self.conformed_data()
78+ # compressed_data = conformed_data["data"]
79+ # uncompressed_indices = conformed_data["uncompressed_indices"]
80+ #
81+ # # If possible, convert the compressed data to a dask array
82+ # # that doesn't support concurrent reads. This prevents
83+ # # "compute called by compute" failures problems at compute
84+ # # time.
85+ # #
86+ # # TODO: This won't be necessary if this is refactored so that
87+ # # the compressed data is part of the same dask graph as
88+ # # the compressed subarrays.
89+ # compressed_data = self._lock_file_read(compressed_data)
90+ #
91+ # # Get the (cfdm) subarray class
92+ # Subarray = self.get_Subarray()
93+ # subarray_name = Subarray().__class__.__name__
94+ #
95+ # # Set the chunk sizes for the dask array
96+ # # chunks = self.subarray_shapes(chunks)
97+ # chunks = normalize_chunks(
98+ # self.subarray_shapes(chunks),
99+ # shape=self.shape,
100+ # dtype=dtype,
101+ # )
102+ #
103+ # dsk = {}
104+ # for u_indices, u_shape, c_indices, chunk_location in zip(
105+ # *self.subarrays(chunks)
106+ # ):
107+ # subarray = Subarray(
108+ # data=compressed_data,
109+ # indices=c_indices,
110+ # shape=u_shape,
111+ # compressed_dimensions=compressed_dimensions,
112+ # uncompressed_indices=uncompressed_indices,
113+ # context_manager=context,
114+ # )
115+ #
116+ # key = f"{subarray_name}-{tokenize(subarray)}"
117+ # dsk[key] = subarray
118+ #
119+ # dsk[name + chunk_location] = (getter, key, Ellipsis, False, False)
120+ #
121+ # # Return the dask array
122+ # return da.Array(dsk, name[0], chunks=chunks, dtype=dtype)
0 commit comments