def gdal_create_dataset(drv, name, cols=0, rows=0, bands=0,
gdal_type=gdal.GDT_Unknown, remove=False):
"""Creates GDAL.DataSet object.
.. versionadded:: 0.7.0
.. versionchanged:: 0.11.0
- changed parameters to keyword args
- added 'bands' as parameter
Parameters
----------
drv : string
GDAL driver string
name : string
path to filename
cols : int
# of columns
rows : int
# of rows
bands : int
# of raster bands
gdal_type : raster data type
eg. gdal.GDT_Float32
remove : bool
if True, existing gdal.Dataset will be
removed before creation
Returns
-------
out : gdal.Dataset
object
"""
driver = gdal.GetDriverByName(drv)
metadata = driver.GetMetadata()
if not metadata.get('DCAP_CREATE', False):
raise IOError("Driver %s doesn't support Create() method.".format(drv))
if remove:
if os.path.exists(name):
driver.Delete(name)
ds = driver.Create(name, cols, rows, bands, gdal_type)
return ds
python类Dataset()的实例源码
def write_raster_dataset(fpath, dataset, format, options=None, remove=False):
""" Write raster dataset to file format
.. versionadded 0.10.0
Parameters
----------
fpath : string
A file path - should have file extension corresponding to format.
dataset : gdal.Dataset
gdal raster dataset
format : string
gdal raster format string
options : list
List of option strings for the corresponding format.
remove : bool
if True, existing gdal.Dataset will be
removed before creation
Note
----
For format and options refer to
`formats_list <http://www.gdal.org/formats_list.html>`_.
Examples
--------
See :ref:`notebooks/fileio/wradlib_gis_export_example.ipynb`.
"""
# check for option list
if options is None:
options = []
driver = gdal.GetDriverByName(format)
metadata = driver.GetMetadata()
# check driver capability
if 'DCAP_CREATECOPY' in metadata and metadata['DCAP_CREATECOPY'] != 'YES':
assert "Driver %s doesn't support CreateCopy() method.".format(format)
if remove:
if os.path.exists(fpath):
driver.Delete(fpath)
target = driver.CreateCopy(fpath, dataset, 0, options)
del target
def hall_rectification(reference, subject, out_path, ref_set, sub_set, dd=False, nodata=-9999,
dtype=np.int32, keys=('High/Bright', 'Low/Dark')):
'''
Performs radiometric rectification after Hall et al. (1991) in Remote
Sensing of Environment. Assumes first raster is the reference image and
that none of the targets are NoData pixels in the reference image (they
are filtered out in the subject images). Arguments:
reference The reference image, a gdal.Dataset
subject The subject image, a gdal.Dataset
out_path Path to a directory where the rectified images should be stored
ref_set Sequence of two sequences: "bright" radiometric control set,
then "dark" radiometric control set for reference image
sub_set As with ref_set, a sequence of sequences (e.g., list of two
lists): [[<bright targets>], [<dark targets]]
dd Coordinates are in decimal degrees?
dtype Date type (NumPy dtype) for the array; default is 32-bit Int
nodata The NoData value to use fo all the rasters
keys The names of the dictionary keys for the bright, dark sets,
respectively
'''
# Unpack bright, dark control sets for subject image
bright_targets, dark_targets = (sub_set[keys[0]], sub_set[keys[1]])
# Calculate the mean reflectance in each band for bright, dark targets
bright_ref = spectra_at_xy(reference, ref_set[keys[0]], dd=dd).mean(axis=0)
dark_ref = spectra_at_xy(reference, ref_set[keys[1]], dd=dd).mean(axis=0)
# Calculate transformation for the target image
brights = spectra_at_xy(subject, bright_targets, dd=dd) # Prepare to filter NoData pixels
darks = spectra_at_xy(subject, dark_targets, dd=dd)
# Get the "subject" image means for each radiometric control set
mean_bright = brights[
np.sum(brights, axis=1) != (nodata * brights.shape[1])
].mean(axis=0)
mean_dark = darks[
np.sum(darks, axis=1) != (nodata * darks.shape[1])
].mean(axis=0)
# Calculate the coefficients of the linear transformation
m = (bright_ref - dark_ref) / (mean_bright - mean_dark)
b = (dark_ref * mean_bright - mean_dark * bright_ref) / (mean_bright - mean_dark)
arr = subject.ReadAsArray()
shp = arr.shape # Remember the original shape
mask = arr.copy() # Save the NoData value locations
m = m.reshape((shp[0], 1))
b = b.reshape((shp[0], 1)).T.repeat(shp[1] * shp[2], axis=0).T
arr2 = ((arr.reshape((shp[0], shp[1] * shp[2])) * m) + b).reshape(shp)
arr2[mask == nodata] = nodata # Re-apply NoData values
# Dump the raster to a file
out_path = os.path.join(out_path, 'rect_%s' % os.path.basename(subject.GetDescription()))
dump_raster(
array_to_raster(arr2, subject.GetGeoTransform(), subject.GetProjection(), dtype=dtype), out_path)
def binary_mask(rast, mask, nodata=-9999, invert=False):
'''
Applies an arbitrary, binary mask (data in [0,1]) where pixels with
a value of 1 are pixels to be masked out. Arguments:
rast A gdal.Dataset or a NumPy array
mask A gdal.Dataset or a NumPy array
nodata The NoData value; defaults to -9999.
invert Invert the mask? (tranpose meaning of 0 and 1); defaults to False.
'''
# Can accept either a gdal.Dataset or numpy.array instance
if not isinstance(rast, np.ndarray):
rastr = rast.ReadAsArray()
else:
rastr = rast.copy()
if not isinstance(mask, np.ndarray):
maskr = mask.ReadAsArray()
else:
maskr = mask.copy()
if not np.alltrue(np.equal(rastr.shape[-2:], maskr.shape[-2:])):
raise ValueError('Raster and mask do not have the same shape')
# Convert Boolean arrays to ones and zeros
if maskr.dtype == bool:
maskr = maskr.astype(np.int0)
# Transform into a "1-band" array and apply the mask
if maskr.shape != rastr.shape:
maskr = maskr.reshape((1, maskr.shape[-2], maskr.shape[-1]))\
.repeat(rastr.shape[0], axis=0) # Copy the mask across the "bands"
# TODO Compare to place(), e.g.,
# np.place(rastr, mask.repeat(rastr.shape[0], axis=0), (nodata,))
# Mask out areas that match the mask (==1)
if invert:
rastr[maskr < 1] = nodata
else:
rastr[maskr > 0] = nodata
return rastr
def mask_ledaps_qa(rast, mask, nodata=-9999):
'''
Applies a given LEDAPS QA mask to a raster. It's unclear how these
bit-packed QA values ought to be converted back into 16-bit binary numbers:
"{0:b}".format(42).zfill(16) # Convert binary to decimal padded left?
"{0:b}".format(42).ljust(16, '0') # Or convert ... padded right?
The temporary solution is to use the most common (modal) value as the
"clear" pixel classification and discard everything else. We'd like to
just discard pixels above a certain value knowing that everything above
this threshold has a certain bit-packed QA meanining. For example, mask
pixels with QA values greater than or equal to 12287:
int("1000000000000000", 2) == 32768 # Maybe clouds
int("0010000000000000", 2) == 12287 # Maybe cirrus
Similarly, we'd like to discard pixels at or below 4, as these small binary
numbers correspond to dropped frames, desginated fill values, and/or
terrain occlusion. Arguments:
rast A gdal.Dataset or a NumPy array
mask A gdal.Dataset or a NumPy array
'''
# Can accept either a gdal.Dataset or numpy.array instance
if not isinstance(rast, np.ndarray):
rast = rast.ReadAsArray()
else:
rastr = rast.copy()
if not isinstance(mask, np.ndarray):
maskr = mask.ReadAsArray()
else:
maskr = mask.copy()
# Since the QA output is so unreliable (e.g., clouds are called water),
# we take the most common QA bit-packed value and assume it refers to
# the "okay" pixels
mode = np.argmax(np.bincount(maskr.ravel()))
assert mode > 4 and mode < 12287, "The modal value corresponds to a known error value"
maskr[np.isnan(maskr)] = 0
maskr[maskr != mode] = 0
maskr[maskr == mode] = 1
# Transform into a "1-band" array and apply the mask
maskr = maskr.reshape((1, maskr.shape[0], maskr.shape[1]))\
.repeat(rastr.shape[0], axis=0) # Copy the mask across the "bands"
rastr[maskr == 0] = nodata
return rastr