diff --git a/examples/multi_gpu/pcqm4m_ogb.py b/examples/multi_gpu/pcqm4m_ogb.py index 10e190251699..2bce247e5d0b 100644 --- a/examples/multi_gpu/pcqm4m_ogb.py +++ b/examples/multi_gpu/pcqm4m_ogb.py @@ -117,7 +117,7 @@ def __init__(self, num_layers, emb_dim, drop_ratio=0.5, JK="last", num_layers (int): number of GNN message passing layers. residual (bool): whether to add residual connection. drop_ratio (float): dropout ratio. - JK (str): "last" or "sum" to choose JK concat strat. + JK (str): "last" or "sum" to choose JK concat strata. residual (bool): Whether or not to add the residual gnn_type (str): Type of GNN to use. """ @@ -302,8 +302,8 @@ def __init__( virtual_node (bool): whether to add virtual node or not. residual (bool): Whether or not to add the residual drop_ratio (float): dropout ratio. - JK (str): "last" or "sum" to choose JK concat strat. - graph_pooling (str): Graph pooling strat to use. + JK (str): "last" or "sum" to choose JK concat strata. + graph_pooling (str): Graph pooling strata to use. """ super().__init__() if num_layers < 2: diff --git a/examples/randlanet_classification.py b/examples/randlanet_classification.py index 213bf957bbdb..0d814974e3a8 100644 --- a/examples/randlanet_classification.py +++ b/examples/randlanet_classification.py @@ -62,9 +62,9 @@ def message(self, x_j: Tensor, pos_i: Tensor, pos_j: Tensor, """Local Spatial Encoding (locSE) and attentive pooling of features. Args: - x_j (Tensor): neighboors features (K,d) + x_j (Tensor): neighbors features (K,d) pos_i (Tensor): centroid position (repeated) (K,3) - pos_j (Tensor): neighboors positions (K,3) + pos_j (Tensor): neighbors positions (K,3) index (Tensor): index of centroid positions (e.g. [0,...,0,1,...,1,...,N,...,N]) diff --git a/graphgym/custom_graphgym/optimizer/example.py b/graphgym/custom_graphgym/optimizer/example.py index 0b2c88a4c86b..dd5c869755b5 100644 --- a/graphgym/custom_graphgym/optimizer/example.py +++ b/graphgym/custom_graphgym/optimizer/example.py @@ -13,7 +13,7 @@ def adagrad_optimizer(params: Iterator[Parameter], base_lr: float, return Adagrad(params, lr=base_lr, weight_decay=weight_decay) -@register.register_scheduler('pleateau') +@register.register_scheduler('plateau') def plateau_scheduler(optimizer: Optimizer, patience: int, lr_decay: float) -> ReduceLROnPlateau: return ReduceLROnPlateau(optimizer, patience=patience, factor=lr_decay) diff --git a/torch_geometric/data/lightning/datamodule.py b/torch_geometric/data/lightning/datamodule.py index 7f0783d07398..499a6d52e981 100644 --- a/torch_geometric/data/lightning/datamodule.py +++ b/torch_geometric/data/lightning/datamodule.py @@ -124,7 +124,7 @@ def __init__( if loader in ['neighbor', 'link_neighbor']: - # Define a new `NeighborSampler` to be re-used across data loaders: + # Define a new `NeighborSampler` to be reused across data loaders: sampler_kwargs, self.loader_kwargs = split_kwargs( self.kwargs, NeighborSampler, diff --git a/torch_geometric/graphgym/models/gnn.py b/torch_geometric/graphgym/models/gnn.py index a011b149523d..8048f3f32dd5 100644 --- a/torch_geometric/graphgym/models/gnn.py +++ b/torch_geometric/graphgym/models/gnn.py @@ -119,7 +119,7 @@ def __init__(self, dim_in: int): has_bias=False, cfg=cfg, )) - # Update `dim_in` to reflect the new dimension fo the node features + # Update `dim_in` to reflect the new dimension of the node features self.dim_in = cfg.gnn.dim_inner if cfg.dataset.edge_encoder: # Encode integer edge features via `torch.nn.Embedding`: