Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@
"filelock",
"flax>=0.4.1",
"hf-doc-builder>=0.3.0",
"huggingface-hub>=0.13.2",
"huggingface-hub==0.24.7",
"requests-mock==1.10.0",
"importlib_metadata",
"invisible-watermark>=0.2.0",
Expand Down
5 changes: 1 addition & 4 deletions src/maxdiffusion/max_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -397,8 +397,7 @@ def setup_initial_state(
if not state:
max_logging.log(f"Could not find the item in orbax, creating state...")

init_train_state_partial = functools.partial(
init_train_state,
state = init_train_state(
model=model,
tx=tx,
weights_init_fn=weights_init_fn,
Expand All @@ -407,8 +406,6 @@ def setup_initial_state(
eval_only=False,
)

state = jax.jit(init_train_state_partial, in_shardings=None, out_shardings=state_mesh_shardings)()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@wang2yn84 the jit function here shards the models according to the sharding specification in the config yml. In DDP, this won't make a difference since the state is replicated, but removing this function would prevent running FSDP or other sharding configurations.


state = unbox_logicallypartioned_trainstate(state)

return state, state_mesh_shardings
Expand Down
Loading