text stringlengths 81 112k |
|---|
Apply `processor` or `self.processor` to `self`.
def process(self, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `self`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: p.process(self)
... |
Apply `processor` or `self.processor` to `item`.
def process_one(self, item:ItemBase, processor:PreProcessors=None):
"Apply `processor` or `self.processor` to `item`."
if processor is not None: self.processor = processor
self.processor = listify(self.processor)
for p in self.processor: ... |
Reconstruct one of the underlying item for its data `t`.
def reconstruct(self, t:Tensor, x:Tensor=None):
"Reconstruct one of the underlying item for its data `t`."
return self[0].reconstruct(t,x) if has_arg(self[0].reconstruct, 'x') else self[0].reconstruct(t) |
Create a new `ItemList` from `items`, keeping the same attributes.
def new(self, items:Iterator, processor:PreProcessors=None, **kwargs)->'ItemList':
"Create a new `ItemList` from `items`, keeping the same attributes."
processor = ifnone(processor, self.processor)
copy_d = {o:getattr(self,o) fo... |
Create an `ItemList` in `path` from the filenames that have a suffix in `extensions`.
`recurse` determines if we search subfolders.
def from_folder(cls, path:PathOrStr, extensions:Collection[str]=None, recurse:bool=True,
include:Optional[Collection[str]]=None, processor:PreProcessors=None, ... |
Create an `ItemList` in `path` from the inputs in the `cols` of `df`.
def from_df(cls, df:DataFrame, path:PathOrStr='.', cols:IntsOrStrs=0, processor:PreProcessors=None, **kwargs)->'ItemList':
"Create an `ItemList` in `path` from the inputs in the `cols` of `df`."
inputs = df.iloc[:,df_names_to_idx(col... |
Create an `ItemList` in `path` from the inputs in the `cols` of `path/csv_name`
def from_csv(cls, path:PathOrStr, csv_name:str, cols:IntsOrStrs=0, delimiter:str=None, header:str='infer',
processor:PreProcessors=None, **kwargs)->'ItemList':
"""Create an `ItemList` in `path` from the inputs in t... |
Use only a sample of `sample_pct`of the full dataset and an optional `seed`.
def use_partial_data(self, sample_pct:float=0.01, seed:int=None)->'ItemList':
"Use only a sample of `sample_pct`of the full dataset and an optional `seed`."
if seed is not None: np.random.seed(seed)
rand_idx = np.rando... |
Save `self.items` to `fn` in `self.path`.
def to_text(self, fn:str):
"Save `self.items` to `fn` in `self.path`."
with open(self.path/fn, 'w') as f: f.writelines([f'{o}\n' for o in self._relative_item_paths()]) |
Only keep elements for which `func` returns `True`.
def filter_by_func(self, func:Callable)->'ItemList':
"Only keep elements for which `func` returns `True`."
self.items = array([o for o in self.items if func(o)])
return self |
Only keep filenames in `include` folder or reject the ones in `exclude`.
def filter_by_folder(self, include=None, exclude=None):
"Only keep filenames in `include` folder or reject the ones in `exclude`."
include,exclude = listify(include),listify(exclude)
def _inner(o):
if isinstanc... |
Keep random sample of `items` with probability `p` and an optional `seed`.
def filter_by_rand(self, p:float, seed:int=None):
"Keep random sample of `items` with probability `p` and an optional `seed`."
if seed is not None: np.random.seed(seed)
return self.filter_by_func(lambda o: rand_bool(p)) |
Don't split the data and create an empty validation set.
def split_none(self):
"Don't split the data and create an empty validation set."
val = self[[]]
val.ignore_empty = True
return self._split(self.path, self, val) |
Split the data between `train` and `valid`.
def split_by_list(self, train, valid):
"Split the data between `train` and `valid`."
return self._split(self.path, train, valid) |
Split the data between `train_idx` and `valid_idx`.
def split_by_idxs(self, train_idx, valid_idx):
"Split the data between `train_idx` and `valid_idx`."
return self.split_by_list(self[train_idx], self[valid_idx]) |
Split the data according to the indexes in `valid_idx`.
def split_by_idx(self, valid_idx:Collection[int])->'ItemLists':
"Split the data according to the indexes in `valid_idx`."
#train_idx = [i for i in range_of(self.items) if i not in valid_idx]
train_idx = np.setdiff1d(arange_of(self.items), ... |
Split the data depending on the folder (`train` or `valid`) in which the filenames are.
def split_by_folder(self, train:str='train', valid:str='valid')->'ItemLists':
"Split the data depending on the folder (`train` or `valid`) in which the filenames are."
return self.split_by_idxs(self._get_by_folder(t... |
Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed.
def split_by_rand_pct(self, valid_pct:float=0.2, seed:int=None)->'ItemLists':
"Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed."
if valid_pct==0.: r... |
Split the items into train set with size `train_size * n` and valid set with size `valid_size * n`.
def split_subsets(self, train_size:float, valid_size:float, seed=None) -> 'ItemLists':
"Split the items into train set with size `train_size * n` and valid set with size `valid_size * n`."
assert 0 < tra... |
Split the data by result of `func` (which returns `True` for validation set).
def split_by_valid_func(self, func:Callable)->'ItemLists':
"Split the data by result of `func` (which returns `True` for validation set)."
valid_idx = [i for i,o in enumerate(self.items) if func(o)]
return self.split_... |
Split the data by using the names in `valid_names` for validation.
def split_by_files(self, valid_names:'ItemList')->'ItemLists':
"Split the data by using the names in `valid_names` for validation."
if isinstance(self.items[0], Path): return self.split_by_valid_func(lambda o: o.name in valid_names)
... |
Split the data by using the names in `fname` for the validation set. `path` will override `self.path`.
def split_by_fname_file(self, fname:PathOrStr, path:PathOrStr=None)->'ItemLists':
"Split the data by using the names in `fname` for the validation set. `path` will override `self.path`."
path = Path(i... |
Split the data from the `col` in the dataframe in `self.inner_df`.
def split_from_df(self, col:IntsOrStrs=2):
"Split the data from the `col` in the dataframe in `self.inner_df`."
valid_idx = np.where(self.inner_df.iloc[:,df_names_to_idx(col, self.inner_df)])[0]
return self.split_by_idx(valid_id... |
Return `label_cls` or guess one from the first element of `labels`.
def get_label_cls(self, labels, label_cls:Callable=None, label_delim:str=None, **kwargs):
"Return `label_cls` or guess one from the first element of `labels`."
if label_cls is not None: return label_cls
if self.la... |
Label `self.items` with `labels`.
def _label_from_list(self, labels:Iterator, label_cls:Callable=None, from_item_lists:bool=False, **kwargs)->'LabelList':
"Label `self.items` with `labels`."
if not from_item_lists:
raise Exception("Your data isn't split, if you don't want a validation set, ... |
Label `self.items` from the values in `cols` in `self.inner_df`.
def label_from_df(self, cols:IntsOrStrs=1, label_cls:Callable=None, **kwargs):
"Label `self.items` from the values in `cols` in `self.inner_df`."
labels = self.inner_df.iloc[:,df_names_to_idx(cols, self.inner_df)]
assert labels.is... |
Label every item with `const`.
def label_const(self, const:Any=0, label_cls:Callable=None, **kwargs)->'LabelList':
"Label every item with `const`."
return self.label_from_func(func=lambda o: const, label_cls=label_cls, **kwargs) |
Label every item with an `EmptyLabel`.
def label_empty(self, **kwargs):
"Label every item with an `EmptyLabel`."
kwargs['label_cls'] = EmptyLabelList
return self.label_from_func(func=lambda o: 0., **kwargs) |
Apply `func` to every input to get its label.
def label_from_func(self, func:Callable, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply `func` to every input to get its label."
return self._label_from_list([func(o) for o in self.items], label_cls=label_cls, **kwargs) |
Give a label to each filename depending on its folder.
def label_from_folder(self, label_cls:Callable=None, **kwargs)->'LabelList':
"Give a label to each filename depending on its folder."
return self.label_from_func(func=lambda o: (o.parts if isinstance(o, Path) else o.split(os.path.sep))[-2],
... |
Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full name.
def label_from_re(self, pat:str, full_path:bool=False, label_cls:Callable=None, **kwargs)->'LabelList':
"Apply the re in `pat` to determine the label of every filename. If `full_path`, search in the full ... |
Generate classes from `items` by taking the sorted unique values.
def generate_classes(self, items):
"Generate classes from `items` by taking the sorted unique values."
classes = set()
for c in items: classes = classes.union(set(c))
classes = list(classes)
classes.sort()
... |
Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default.
def label_from_lists(self, train_labels:Iterator, valid_labels:Iterator, label_cls:Callable=None, **kwargs)->'LabelList':
"Use the labels in `train_labels` and `valid_labels` to label the data. `label... |
Set `tfms` to be applied to the xs of the train and validation set.
def transform(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the xs of the train and validation set."
if not tfms: tfms=(None,None)
assert is_listy(tfms) and len(tfms) == 2, "Pl... |
Set `tfms` to be applied to the ys of the train and validation set.
def transform_y(self, tfms:Optional[Tuple[TfmList,TfmList]]=(None,None), **kwargs):
"Set `tfms` to be applied to the ys of the train and validation set."
if not tfms: tfms=(None,None)
self.train.transform_y(tfms[0], **kwargs)
... |
Read the default class processors if none have been set.
def get_processors(self):
"Read the default class processors if none have been set."
procs_x,procs_y = listify(self.train.x._processor),listify(self.train.y._processor)
xp = ifnone(self.train.x.processor, [p(ds=self.train.x) for p in proc... |
Process the inner datasets.
def process(self):
"Process the inner datasets."
xp,yp = self.get_processors()
for ds,n in zip(self.lists, ['train','valid','test']): ds.process(xp, yp, name=n)
#progress_bar clear the outputs so in some case warnings issued during processing disappear.
... |
Create an `DataBunch` from self, `path` will override `self.path`, `kwargs` are passed to `DataBunch.create`.
def databunch(self, path:PathOrStr=None, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus,
dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=... |
Create a `LabelLists` with empty sets from the serialized `state`.
def load_state(cls, path:PathOrStr, state:dict):
"Create a `LabelLists` with empty sets from the serialized `state`."
path = Path(path)
train_ds = LabelList.load_state(path, state)
valid_ds = LabelList.load_state(path, s... |
Create a `LabelLists` with empty sets from the serialized file in `path/fn`.
def load_empty(cls, path:PathOrStr, fn:PathOrStr='export.pkl'):
"Create a `LabelLists` with empty sets from the serialized file in `path/fn`."
path = Path(path)
state = torch.load(open(path/fn, 'rb'))
return La... |
For inference, will briefly replace the dataset with one that only contains `item`.
def set_item(self,item):
"For inference, will briefly replace the dataset with one that only contains `item`."
self.item = self.x.process_one(item)
yield None
self.item = None |
Create `pd.DataFrame` containing `items` from `self.x` and `self.y`.
def to_df(self)->None:
"Create `pd.DataFrame` containing `items` from `self.x` and `self.y`."
return pd.DataFrame(dict(x=self.x._relative_item_paths(), y=[str(o) for o in self.y])) |
Save `self.to_df()` to a CSV file in `self.path`/`dest`.
def to_csv(self, dest:str)->None:
"Save `self.to_df()` to a CSV file in `self.path`/`dest`."
self.to_df().to_csv(self.path/dest, index=False) |
Return the minimal state for export.
def get_state(self, **kwargs):
"Return the minimal state for export."
state = {'x_cls':self.x.__class__, 'x_proc':self.x.processor,
'y_cls':self.y.__class__, 'y_proc':self.y.processor,
'tfms':self.tfms, 'tfm_y':self.tfm_y, 'tfmargs'... |
Export the minimal state and save it in `fn` to load an empty version for inference.
def export(self, fn:PathOrStr, **kwargs):
"Export the minimal state and save it in `fn` to load an empty version for inference."
pickle.dump(self.get_state(**kwargs), open(fn, 'wb')) |
Load the state in `fn` to create an empty `LabelList` for inference.
def load_empty(cls, path:PathOrStr, fn:PathOrStr):
"Load the state in `fn` to create an empty `LabelList` for inference."
return cls.load_state(path, pickle.load(open(Path(path)/fn, 'rb'))) |
Create a `LabelList` from `state`.
def load_state(cls, path:PathOrStr, state:dict) -> 'LabelList':
"Create a `LabelList` from `state`."
x = state['x_cls']([], path=path, processor=state['x_proc'], ignore_empty=True)
y = state['y_cls']([], path=path, processor=state['y_proc'], ignore_empty=True)... |
Launch the processing on `self.x` and `self.y` with `xp` and `yp`.
def process(self, xp:PreProcessor=None, yp:PreProcessor=None, name:str=None):
"Launch the processing on `self.x` and `self.y` with `xp` and `yp`."
self.y.process(yp)
if getattr(self.y, 'filter_missing_y', False):
fil... |
Set the `tfms` and `tfm_y` value to be applied to the inputs and targets.
def transform(self, tfms:TfmList, tfm_y:bool=None, **kwargs):
"Set the `tfms` and `tfm_y` value to be applied to the inputs and targets."
_check_kwargs(self.x, tfms, **kwargs)
if tfm_y is None: tfm_y = self.tfm_y
... |
Set `tfms` to be applied to the targets only.
def transform_y(self, tfms:TfmList=None, **kwargs):
"Set `tfms` to be applied to the targets only."
_check_kwargs(self.y, tfms, **kwargs)
self.tfm_y=True
if tfms is None:
self.tfms_y = list(filter(lambda t: t.use_on_y, listify(se... |
Create a new `ItemList` from `items`, keeping the same attributes.
def new(self, item_lists, processor:PreProcessor=None, **kwargs)->'ItemList':
"Create a new `ItemList` from `items`, keeping the same attributes."
processor = ifnone(processor, self.processor)
copy_d = {o:getattr(self,o) for o i... |
Parse the docstring into its components.
:return: a dictionary of form
{
"short_description": ...,
"long_description": ...,
"params": [{"name": ..., "doc": ...}, ...],
"vals": [{"name": ..., "doc": ...}, ...],
"... |
Return env var value if it's defined and not an empty string, or return Unknown
def get_env(name):
"Return env var value if it's defined and not an empty string, or return Unknown"
res = os.environ.get(name,'')
return res if len(res) else "Unknown" |
Print user's setup information
def show_install(show_nvidia_smi:bool=False):
"Print user's setup information"
import platform, fastai.version
rep = []
opt_mods = []
rep.append(["=== Software ===", None])
rep.append(["python", platform.python_version()])
rep.append(["fastai", fastai.__ver... |
Check whether module==version is available on pypi
def pypi_module_version_is_available(module, version):
"Check whether module==version is available on pypi"
# returns True/False (or None if failed to execute the check)
# using a hack that when passing "module==" w/ no version number to pip
# it "fai... |
Suggest how to improve the setup to speed things up
def check_perf():
"Suggest how to improve the setup to speed things up"
from PIL import features, Image
from packaging import version
print("Running performance checks.")
# libjpeg_turbo check
print("\n*** libjpeg-turbo status")
if vers... |
Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0.
def annealing_linear(start:Number, end:Number, pct:float)->Number:
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start) |
Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0.
def annealing_exp(start:Number, end:Number, pct:float)->Number:
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct |
Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0.
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out |
Helper function for `anneal_poly`.
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
"Helper function for `anneal_poly`."
return end + (start-end) * (1-pct)**degree |
Create an `optim.Optimizer` from `opt_func` with `lr`. Set lr on `layer_groups`.
def create(cls, opt_func:Union[type,Callable], lr:Union[float,Tuple,List], layer_groups:ModuleList, wd:Floats=0.,
true_wd:bool=False, bn_wd:bool=True)->optim.Optimizer:
"Create an `optim.Optimizer` from `opt_func` ... |
Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters.
def new(self, layer_groups:Collection[nn.Module], split_no_wd:bool=True):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, 'opt... |
Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters.
def new_with_params(self, param_groups:Collection[Collection[nn.Parameter]]):
"Create a new `OptimWrapper` from `self` with another `layer_groups` but the same hyper-parameters."
opt_func = getattr(self, ... |
Set weight decay and step optimizer.
def step(self)->None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for... |
Set beta (or alpha as makes sense for given optimizer).
def beta(self, val:float)->None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys: self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt... |
Set weight decay.
def wd(self, val:float)->None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd) |
Read the values inside the optimizer for the hyper-parameters.
def read_defaults(self)->None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read... |
Set `val` inside the optimizer dictionary at `key`.
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
"Set `val` inside the optimizer dictionary at `key`."
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_gr... |
Read a hyperparameter `key` in the optimizer dictionary.
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter `key` in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in va... |
Return the inner state minus the layer groups.
def get_state(self):
"Return the inner state minus the layer groups."
return {'opt_state':self.opt.state_dict(), 'lr':self._lr, 'wd':self._wd, 'beta':self._beta, 'mom':self._mom,
'opt_func':self.opt_func, 'true_wd':self.true_wd, 'bn_wd':sel... |
Return the inner state of the `Callback`, `minimal` or not.
def get_state(self, minimal:bool=True):
"Return the inner state of the `Callback`, `minimal` or not."
to_remove = ['exclude', 'not_min'] + getattr(self, 'exclude', []).copy()
if minimal: to_remove += getattr(self, 'not_min', []).copy()... |
Add `val` to calculate updated smoothed value.
def add_value(self, val:float)->None:
"Add `val` to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n) |
Update metric computation with `last_output` and `last_target`.
def on_batch_end(self, last_output, last_target, **kwargs):
"Update metric computation with `last_output` and `last_target`."
if not is_listy(last_target): last_target=[last_target]
self.count += last_target[0].size(0)
val ... |
Set the final result in `last_metrics`.
def on_epoch_end(self, last_metrics, **kwargs):
"Set the final result in `last_metrics`."
return add_metrics(last_metrics, self.val/self.count) |
Return next value along annealed schedule.
def step(self)->Number:
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter) |
Build anneal schedule for all of the parameters.
def steps(self, *steps_cfg:StartOptEnd):
"Build anneal schedule for all of the parameters."
return [Scheduler(step, n_iter, func=func)
for (step,(n_iter,func)) in zip(steps_cfg, self.phases)] |
Initialize our optimization params based on our annealing schedule.
def on_train_begin(self, n_epochs:int, epoch:int, **kwargs:Any)->None:
"Initialize our optimization params based on our annealing schedule."
res = {'epoch':self.start_epoch} if self.start_epoch is not None else None
self.start_... |
Take one step forward on the annealing schedule for the optim params.
def on_batch_end(self, train, **kwargs:Any)->None:
"Take one step forward on the annealing schedule for the optim params."
if train:
if self.idx_s >= len(self.lr_scheds): return {'stop_training': True, 'stop_epoch': True}... |
Distributed training of Imagenette.
Fastest multi-gpu speed is if you run with: python -m fastai.launch
def main(
gpu:Param("GPU to run on", str)=None,
lr: Param("Learning rate", float)=1e-3,
size: Param("Size (px: 128,192,224)", int)=128,
debias_mom: Param("Debias statistics", bool... |
A basic critic for images `n_channels` x `in_size` x `in_size`.
def basic_critic(in_size:int, n_channels:int, n_features:int=64, n_extra_layers:int=0, **conv_kwargs):
"A basic critic for images `n_channels` x `in_size` x `in_size`."
layers = [conv_layer(n_channels, n_features, 4, 2, 1, leaky=0.2, norm_type=Non... |
A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`.
def basic_generator(in_size:int, n_channels:int, noise_sz:int=100, n_features:int=64, n_extra_layers=0, **conv_kwargs):
"A basic generator from `noise_sz` to images `n_channels` x `in_size` x `in_size`."
cur_size, cur_ftrs = 4, n... |
Define loss functions for a GAN from `loss_gen` and `loss_crit`.
def gan_loss_from_func(loss_gen, loss_crit, weights_gen:Tuple[float,float]=None):
"Define loss functions for a GAN from `loss_gen` and `loss_crit`."
def _loss_G(fake_pred, output, target, weights_gen=weights_gen):
ones = fake_pred.new_one... |
Critic to train a `GAN`.
def gan_critic(n_channels:int=3, nf:int=128, n_blocks:int=3, p:int=0.15):
"Critic to train a `GAN`."
layers = [
_conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
res_block(nf, dense=True,**_conv_args)]
nf *= 2 # after dense block
for i in range(n... |
Compute accuracy after expanding `y_true` to the size of `y_pred`.
def accuracy_thresh_expand(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor:
"Compute accuracy after expanding `y_true` to the size of `y_pred`."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh)==... |
Put the model in generator mode if `gen_mode`, in critic mode otherwise.
def switch(self, gen_mode:bool=None):
"Put the model in generator mode if `gen_mode`, in critic mode otherwise."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode |
Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`.
def generator(self, output, target):
"Evaluate the `output` with the critic then uses `self.loss_funcG` to combine it with `target`."
fake_pred = self.gan_model.critic(output)
return self.loss_funcG(f... |
Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.loss_funcD`.
def critic(self, real_pred, input):
"Create some `fake_pred` with the generator from `input` and compare them to `real_pred` in `self.loss_funcD`."
fake = self.gan_model.generator(input.require... |
Create the optimizers for the generator and critic if necessary, initialize smootheners.
def on_train_begin(self, **kwargs):
"Create the optimizers for the generator and critic if necessary, initialize smootheners."
if not getattr(self,'opt_gen',None):
self.opt_gen = self.opt.new([nn.Sequen... |
Clamp the weights with `self.clip` if it's not None, return the correct input.
def on_batch_begin(self, last_input, last_target, **kwargs):
"Clamp the weights with `self.clip` if it's not None, return the correct input."
if self.clip is not None:
for p in self.critic.parameters(): p.data.cl... |
Record `last_loss` in the proper list.
def on_backward_begin(self, last_loss, last_output, **kwargs):
"Record `last_loss` in the proper list."
last_loss = last_loss.detach().cpu()
if self.gen_mode:
self.smoothenerG.add_value(last_loss)
self.glosses.append(self.smoothener... |
Put the various losses in the recorder and show a sample image.
def on_epoch_end(self, pbar, epoch, last_metrics, **kwargs):
"Put the various losses in the recorder and show a sample image."
if not hasattr(self, 'last_gen') or not self.show_img: return
data = self.learn.data
img = self.... |
Switch the model, if `gen_mode` is provided, in the desired mode.
def switch(self, gen_mode:bool=None):
"Switch the model, if `gen_mode` is provided, in the desired mode."
self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode
self.opt.opt = self.opt_gen.opt if self.gen_mode else... |
Switch the model if necessary.
def on_batch_end(self, iteration, **kwargs):
"Switch the model if necessary."
if self.learn.gan_trainer.gen_mode:
self.n_g += 1
n_iter,n_in,n_out = self.n_gen,self.n_c,self.n_g
else:
self.n_c += 1
n_iter,n_in,n_out =... |
Create a GAN from `learn_gen` and `learn_crit`.
def from_learners(cls, learn_gen:Learner, learn_crit:Learner, switcher:Callback=None,
weights_gen:Tuple[float,float]=None, **learn_kwargs):
"Create a GAN from `learn_gen` and `learn_crit`."
losses = gan_loss_from_func(learn_gen.loss_... |
Create a WGAN from `data`, `generator` and `critic`.
def wgan(cls, data:DataBunch, generator:nn.Module, critic:nn.Module, switcher:Callback=None, clip:float=0.01, **learn_kwargs):
"Create a WGAN from `data`, `generator` and `critic`."
return cls(data, generator, critic, NoopLoss(), WassersteinLoss(), s... |
Shows `ys` (target images) on a figure of `figsize`.
def show_xys(self, xs, ys, imgsize:int=4, figsize:Optional[Tuple[int,int]]=None, **kwargs):
"Shows `ys` (target images) on a figure of `figsize`."
super().show_xys(ys, xs, imgsize=imgsize, figsize=figsize, **kwargs) |
Multiply the current lr if necessary.
def on_batch_begin(self, train, **kwargs):
"Multiply the current lr if necessary."
if not self.learn.gan_trainer.gen_mode and train: self.learn.opt.lr *= self.mult_lr |
Put the LR back to its value if necessary.
def on_step_end(self, **kwargs):
"Put the LR back to its value if necessary."
if not self.learn.gan_trainer.gen_mode: self.learn.opt.lr /= self.mult_lr |
Get the indexes of the layers where the size of the activation changes.
def _get_sfs_idxs(sizes:Sizes) -> List[int]:
"Get the indexes of the layers where the size of the activation changes."
feature_szs = [size[-1] for size in sizes]
sfs_idxs = list(np.where(np.array(feature_szs[:-1]) != np.array(feature_s... |
Search for `n_images` images on Google, matching `search_term` and `size` requirements,
download them into `path`/`search_term` and verify them, using `max_workers` threads.
def download_google_images(path:PathOrStr, search_term:str, size:str='>400*300', n_images:int=10, format:str='jpg',
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.