From 2367e456377d7777b3c985b7702010eea0af5312 Mon Sep 17 00:00:00 2001 From: Kenneth Lamar Date: Wed, 13 May 2020 17:30:58 -0400 Subject: [PATCH 1/4] Enhanced Colab notebook Added support for priming the model. Data now persists in Google Drive. Added the ability to resume from checkpoints. --- jukebox/Interacting_with_Jukebox.ipynb | 306 +++++++++++++++++++++---- 1 file changed, 264 insertions(+), 42 deletions(-) diff --git a/jukebox/Interacting_with_Jukebox.ipynb b/jukebox/Interacting_with_Jukebox.ipynb index 616504d4eb..540990acbd 100644 --- a/jukebox/Interacting_with_Jukebox.ipynb +++ b/jukebox/Interacting_with_Jukebox.ipynb @@ -15,15 +15,32 @@ "accelerator": "GPU" }, "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "uq8uLwZCn0BV", + "colab_type": "text" + }, + "source": [ + "IMPORTANT NOTE ON SYSTEM REQUIREMENTS:\n", + "\n", + "If you are connecting to a hosted runtime, make sure it has a P100 GPU (optionally run !nvidia-smi to confirm). Go to Edit>Notebook Settings to set this.\n", + "\n", + "CoLab may first assign you a lower memory machine if you are using a hosted runtime. If so, the first time you try to load the 5B model, it will run out of memory, and then you'll be prompted to restart with more memory (then return to the top of this CoLab). If you continue to have memory issues after this (or run into issues on your own home setup), switch to the 1B model.\n", + "\n", + "If you are using a local GPU, we recommend V100 or P100 with 16GB GPU memory for best performance. For GPU’s with less memory, we recommend using the 1B model and a smaller batch size throughout. \n", + "\n" + ] + }, { "cell_type": "code", "metadata": { - "id": "sAdFGF-bqVMY", + "id": "8qEqdj8u0gdN", "colab_type": "code", "colab": {} }, "source": [ - "!pip install git+https://github.com/openai/jukebox.git" + "!nvidia-smi -L" ], "execution_count": 0, "outputs": [] @@ -31,29 +48,46 @@ { "cell_type": "markdown", "metadata": { - "id": "uq8uLwZCn0BV", + "id": "VAMZK4GNA_PM", "colab_type": "text" }, "source": [ - "IMPORTANT NOTE ON SYSTEM REQUIREMENTS:\n", - "\n", - "If you are connecting to a hosted runtime, make sure it has a P100 GPU (optionally run !nvidia-smi to confirm). Go to Edit>Notebook Settings to set this.\n", - "\n", - "CoLab may first assign you a lower memory machine if you are using a hosted runtime. If so, the first time you try to load the 5B model, it will run out of memory, and then you'll be prompted to restart with more memory (then return to the top of this CoLab). If you continue to have memory issues after this (or run into issues on your own home setup), switch to the 1B model.\n", - "\n", - "If you are using a local GPU, we recommend V100 or P100 with 16GB GPU memory for best performance. For GPU’s with less memory, we recommend using the 1B model and a smaller batch size throughout. \n", - "\n" + "Mount Google Drive to save sample levels as they are generated." ] }, { "cell_type": "code", "metadata": { - "id": "8qEqdj8u0gdN", + "id": "ZPdMgaH_BPGN", "colab_type": "code", "colab": {} }, "source": [ - "!nvidia-smi" + "from google.colab import drive\n", + "drive.mount('/content/gdrive')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zy4Rehq9ZKv_", + "colab_type": "text" + }, + "source": [ + "Prepare the environment." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "sAdFGF-bqVMY", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install git+https://github.com/openai/jukebox.git" ], "execution_count": 0, "outputs": [] @@ -74,7 +108,8 @@ "from jukebox.make_models import make_vqvae, make_prior, MODELS, make_model\n", "from jukebox.hparams import Hyperparams, setup_hparams\n", "from jukebox.sample import sample_single_window, _sample, \\\n", - " sample_partial_window, upsample\n", + " sample_partial_window, upsample, \\\n", + " load_prompts\n", "from jukebox.utils.dist_utils import setup_dist_from_mpi\n", "from jukebox.utils.torch_utils import empty_cache\n", "rank, local_rank, device = setup_dist_from_mpi()" @@ -100,11 +135,13 @@ "colab": {} }, "source": [ - "model = \"5b_lyrics\" # or \"1b_lyrics\" \n", + "model = \"5b_lyrics\" # or \"1b_lyrics\"\n", "hps = Hyperparams()\n", "hps.sr = 44100\n", "hps.n_samples = 3 if model=='5b_lyrics' else 8\n", - "hps.name = 'samples'\n", + "# Specifies the directory to save the sample in.\n", + "# We set this to the Google Drive mount point.\n", + "hps.name = '/content/gdrive/My Drive/samples'\n", "chunk_size = 16 if model==\"5b_lyrics\" else 32\n", "max_batch_size = 3 if model==\"5b_lyrics\" else 16\n", "hps.levels = 3\n", @@ -112,8 +149,112 @@ "\n", "vqvae, *priors = MODELS[model]\n", "vqvae = make_vqvae(setup_hparams(vqvae, dict(sample_length = 1048576)), device)\n", - "top_prior = make_prior(setup_hparams(priors[-1], dict()), vqvae, device)\n", - "\n" + "top_prior = make_prior(setup_hparams(priors[-1], dict()), vqvae, device)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rvf-5pnjbmI1", + "colab_type": "text" + }, + "source": [ + "# Select mode\n", + "Run one of these cells to select the desired mode." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VVOQ3egdj65y", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# The default mode of operation.\n", + "# Creates songs based on artist and genre conditioning.\n", + "mode = 'ancestral'\n", + "codes_file=None\n", + "audio_file=None\n", + "prompt_length_in_seconds=None" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "Vqqv2rJKkMXd", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Prime song creation using an arbitrary audio sample.\n", + "mode = 'primed'\n", + "codes_file=None\n", + "# Specify an audio file here.\n", + "audio_file = '/content/gdrive/My Drive/primer.wav'\n", + "# Specify how many seconds of audio to prime on.\n", + "prompt_length_in_seconds=12" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OxZMi-S3cT2b", + "colab_type": "text" + }, + "source": [ + "Run this cell to automatically resume from the latest checkpoint file, but only if the checkpoint file exists.\n", + "This will override the selected mode.\n", + "We will assume the existance of a checkpoint means generation is complete and it's time for upsamping to occur." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GjRwyTDhbvf-", + "colab_type": "code", + "colab": {} + }, + "source": [ + "if os.path.exists(hps.name):\n", + " # Identify the lowest level generated and continue from there.\n", + " for level in [1, 2]:\n", + " data = f\"{hps.name}/level_{level}/data.pth.tar\"\n", + " if os.path.isfile(data):\n", + " mode = 'upsample'\n", + " codes_file = data\n", + " print('Upsampling from level '+str(level))\n", + " break\n", + "print('mode is now '+mode)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UA2UhOZ4YfZj", + "colab_type": "text" + }, + "source": [ + "Run the cell below regardless of which mode you chose." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Jp7nKnCmk1bx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "sample_hps = Hyperparams(dict(mode=mode, codes_file=codes_file, audio_file=audio_file, prompt_length_in_seconds=prompt_length_in_seconds))" ], "execution_count": 0, "outputs": [] @@ -136,12 +277,11 @@ "colab": {} }, "source": [ - "sample_length_in_seconds = 60 # Full length of musical sample to generate - we find songs in the 1 to 4 minute\n", + "sample_length_in_seconds = 71 # Full length of musical sample to generate - we find songs in the 1 to 4 minute\n", " # range work well, with generation time proportional to sample length. \n", " # This total length affects how quickly the model \n", " # progresses through lyrics (model also generates differently\n", " # depending on if it thinks it's in the beginning, middle, or end of sample)\n", - "\n", "hps.sample_length = (int(sample_length_in_seconds*hps.sr)//top_prior.raw_to_tokens)*top_prior.raw_to_tokens\n", "assert hps.sample_length >= top_prior.n_ctx*top_prior.raw_to_tokens, f'Please choose a larger sampling rate'" ], @@ -156,25 +296,87 @@ "colab": {} }, "source": [ - "metas = [dict(artist = \"Zac Brown Band\",\n", - " genre = \"Country\",\n", + "# Note: Metas can contain different prompts per sample.\n", + "# By default, all samples use the same prompt.\n", + "metas = [dict(artist = \"Rick Astley\",\n", + " genre = \"Pop\",\n", " total_length = hps.sample_length,\n", " offset = 0,\n", - " lyrics = \"\"\"I met a traveller from an antique land,\n", - " Who said—“Two vast and trunkless legs of stone\n", - " Stand in the desert. . . . Near them, on the sand,\n", - " Half sunk a shattered visage lies, whose frown,\n", - " And wrinkled lip, and sneer of cold command,\n", - " Tell that its sculptor well those passions read\n", - " Which yet survive, stamped on these lifeless things,\n", - " The hand that mocked them, and the heart that fed;\n", - " And on the pedestal, these words appear:\n", - " My name is Ozymandias, King of Kings;\n", - " Look on my Works, ye Mighty, and despair!\n", - " Nothing beside remains. Round the decay\n", - " Of that colossal Wreck, boundless and bare\n", - " The lone and level sands stretch far away\n", - " \"\"\",\n", + " lyrics = \"\"\"We're no strangers to love\n", + "You know the rules and so do I\n", + "A full commitment's what I'm thinking of\n", + "You wouldn't get this from any other guy\n", + "\n", + "I just wanna tell you how I'm feeling\n", + "Gotta make you understand\n", + "\n", + "Never gonna give you up\n", + "Never gonna let you down\n", + "Never gonna run around and desert you\n", + "Never gonna make you cry\n", + "Never gonna say goodbye\n", + "Never gonna tell a lie and hurt you\n", + "\n", + "We've known each other for so long\n", + "Your heart's been aching, but\n", + "You're too shy to say it\n", + "Inside, we both know what's been going on\n", + "We know the game and we're gonna play it\n", + "\n", + "And if you ask me how I'm feeling\n", + "Don't tell me you're too blind to see\n", + "\n", + "Never gonna give you up\n", + "Never gonna let you down\n", + "Never gonna run around and desert you\n", + "Never gonna make you cry\n", + "Never gonna say goodbye\n", + "Never gonna tell a lie and hurt you\n", + "\n", + "Never gonna give you up\n", + "Never gonna let you down\n", + "Never gonna run around and desert you\n", + "Never gonna make you cry\n", + "Never gonna say goodbye\n", + "Never gonna tell a lie and hurt you\n", + "\n", + "(Ooh, give you up)\n", + "(Ooh, give you up)\n", + "Never gonna give, never gonna give\n", + "(Give you up)\n", + "Never gonna give, never gonna give\n", + "(Give you up)\n", + "\n", + "We've known each other for so long\n", + "Your heart's been aching, but\n", + "You're too shy to say it\n", + "Inside, we both know what's been going on\n", + "We know the game and we're gonna play it\n", + "\n", + "I just wanna tell you how I'm feeling\n", + "Gotta make you understand\n", + "\n", + "Never gonna give you up\n", + "Never gonna let you down\n", + "Never gonna run around and desert you\n", + "Never gonna make you cry\n", + "Never gonna say goodbye\n", + "Never gonna tell a lie and hurt you\n", + "\n", + "Never gonna give you up\n", + "Never gonna let you down\n", + "Never gonna run around and desert you\n", + "Never gonna make you cry\n", + "Never gonna say goodbye\n", + "Never gonna tell a lie and hurt you\n", + "\n", + "Never gonna give you up\n", + "Never gonna let you down\n", + "Never gonna run around and desert you\n", + "Never gonna make you cry\n", + "Never gonna say goodbye\n", + "Never gonna tell a lie and hurt you\n", + "\"\"\",\n", " ),\n", " ] * hps.n_samples\n", "labels = [None, None, top_prior.labeller.get_batch_labels(metas, 'cuda')]" @@ -233,13 +435,31 @@ { "cell_type": "code", "metadata": { - "id": "2nET_YBEopyp", + "id": "9a1tlvcVlHhN", "colab_type": "code", "colab": {} }, "source": [ - "zs = [t.zeros(hps.n_samples,0,dtype=t.long, device='cuda') for _ in range(len(priors))]\n", - "zs = _sample(zs, labels, sampling_kwargs, [None, None, top_prior], [2], hps)" + "if sample_hps.mode == 'ancestral':\n", + " zs = [t.zeros(hps.n_samples,0,dtype=t.long, device='cuda') for _ in range(len(priors))]\n", + " zs = _sample(zs, labels, sampling_kwargs, [None, None, top_prior], [2], hps)\n", + "elif sample_hps.mode == 'upsample':\n", + " assert sample_hps.codes_file is not None\n", + " # Load codes.\n", + " data = t.load(sample_hps.codes_file, map_location='cpu')\n", + " zs = [z.cuda() for z in data['zs']]\n", + " assert zs[-1].shape[0] == hps.n_samples, f\"Expected bs = {hps.n_samples}, got {zs[-1].shape[0]}\"\n", + " del data\n", + " print('Falling through to the upsample step later in the notebook.')\n", + "elif sample_hps.mode == 'primed':\n", + " assert sample_hps.audio_file is not None\n", + " audio_files = sample_hps.audio_file.split(',')\n", + " duration = (int(sample_hps.prompt_length_in_seconds*hps.sr)//top_prior.raw_to_tokens)*top_prior.raw_to_tokens\n", + " x = load_prompts(audio_files, duration, hps)\n", + " zs = top_prior.encode(x, start_level=0, end_level=len(priors), bs_chunks=x.shape[0])\n", + " zs = _sample(zs, labels, sampling_kwargs, [None, None, top_prior], [2], hps)\n", + "else:\n", + " raise ValueError(f'Unknown sample mode {sample_hps.mode}.')" ], "execution_count": 0, "outputs": [] @@ -316,7 +536,7 @@ "colab": {} }, "source": [ - "zs = upsample(zs, labels, sampling_kwargs, [*upsamplers, top_prior], hps)\n" + "zs = upsample(zs, labels, sampling_kwargs, [*upsamplers, top_prior], hps)" ], "execution_count": 0, "outputs": [] @@ -392,7 +612,9 @@ "hps = Hyperparams()\n", "hps.sr = 44100\n", "hps.n_samples = 3 if model=='5b_lyrics' else 16\n", - "hps.name = 'co_composer'\n", + "# Specifies the directory to save the sample in.\n", + "# We set this to the Google Drive mount point.\n", + "hps.name = '/content/gdrive/My Drive/co_composer'\n", "hps.sample_length = 1048576 if model==\"5b_lyrics\" else 786432 \n", "chunk_size = 16 if model==\"5b_lyrics\" else 32\n", "max_batch_size = 3 if model==\"5b_lyrics\" else 16\n", From e3cd58620917310b930d1b224d4c23fa485fb17b Mon Sep 17 00:00:00 2001 From: Anthony Lexander Matos Date: Thu, 21 May 2020 16:00:50 -0400 Subject: [PATCH 2/4] Add support for prompted sampling with co-composition in notebook --- jukebox/Interacting_with_Jukebox.ipynb | 94 ++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 7 deletions(-) diff --git a/jukebox/Interacting_with_Jukebox.ipynb b/jukebox/Interacting_with_Jukebox.ipynb index 540990acbd..9deeb33431 100644 --- a/jukebox/Interacting_with_Jukebox.ipynb +++ b/jukebox/Interacting_with_Jukebox.ipynb @@ -628,6 +628,59 @@ "execution_count": 0, "outputs": [] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Select mode\n", + "Run one of these cells to select the desired mode." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": {}, + "outputs": [], + "source": [ + "# The default mode of operation.\n", + "# Creates songs based on artist and genre conditioning.\n", + "mode = 'ancestral'\n", + "codes_file=None\n", + "audio_file=None\n", + "prompt_length_in_seconds=None" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": {}, + "outputs": [], + "source": [ + "# Prime song creation using an arbitrary audio sample.\n", + "mode = 'primed'\n", + "codes_file=None\n", + "# Specify an audio file here.\n", + "audio_file = '/content/gdrive/My Drive/primer.wav'\n", + "# Specify how many seconds of audio to prime on.\n", + "prompt_length_in_seconds=12" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run the cell below regardless of which mode you chose." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": {}, + "outputs": [], + "source": [ + "sample_hps = Hyperparams(dict(mode=mode, codes_file=codes_file, audio_file=audio_file, prompt_length_in_seconds=prompt_length_in_seconds))" + ] + }, { "cell_type": "markdown", "metadata": { @@ -635,7 +688,22 @@ "colab_type": "text" }, "source": [ - "Choose your artist, genre, and lyrics here!" + "Specify your choice of artist, genre, lyrics, and length of musical sample. " + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": {}, + "outputs": [], + "source": [ + "sample_length_in_seconds = 71 # Full length of musical sample to generate - we find songs in the 1 to 4 minute\n", + " # range work well, with generation time proportional to sample length. \n", + " # This total length affects how quickly the model \n", + " # progresses through lyrics (model also generates differently\n", + " # depending on if it thinks it's in the beginning, middle, or end of sample)\n", + "hps.sample_length = (int(sample_length_in_seconds*hps.sr)//top_prior.raw_to_tokens)*top_prior.raw_to_tokens\n", + "assert hps.sample_length >= top_prior.n_ctx*top_prior.raw_to_tokens, f'Please choose a larger sampling rate'" ] }, { @@ -646,10 +714,9 @@ "colab": {} }, "source": [ - "total_sample_length_in_seconds = 120\n", "metas = [dict(artist = \"Zac Brown Band\",\n", " genre = \"Country\",\n", - " total_length = total_sample_length_in_seconds * hps.sr,\n", + " total_length = hps.sample_length,\n", " offset = 0,\n", " lyrics = \"\"\"I met a traveller from an antique land,\n", " Who said—“Two vast and trunkless legs of stone\n", @@ -735,8 +802,13 @@ }, "source": [ "sampling_temperature = .98\n", - "sampling_kwargs = dict(temp=sampling_temperature, fp16=True,\n", - " max_batch_size=max_batch_size, chunk_size=chunk_size)" + "\n", + "lower_batch_size = 16\n", + "max_batch_size = 3 if model == \"5b_lyrics\" else 16\n", + "lower_level_chunk_size = 32\n", + "chunk_size = 16 if model == \"5b_lyrics\" else 32\n", + "sampling_kwargs = dict(temp=sampling_temperature, fp16=True, max_batch_size=lower_batch_size,\n", + " chunk_size=lower_level_chunk_size)" ], "execution_count": 0, "outputs": [] @@ -749,8 +821,16 @@ "colab": {} }, "source": [ - "zs=[t.zeros(hps.n_samples,0,dtype=t.long, device='cuda') for _ in range(3)]\n", - "zs=sample_partial_window(zs, labels, sampling_kwargs, 2, top_prior, tokens_to_sample, hps)\n", + "if sample_hps.mode == 'ancestral':\n", + " zs=[t.zeros(hps.n_samples,0,dtype=t.long, device='cuda') for _ in range(3)]\n", + " zs=sample_partial_window(zs, labels, sampling_kwargs, 2, top_prior, tokens_to_sample, hps)\n", + "elif sample_hps.mode == 'primed':\n", + " assert sample_hps.audio_file is not None\n", + " audio_files = sample_hps.audio_file.split(',')\n", + " duration = (int(sample_hps.prompt_length_in_seconds*hps.sr)//top_prior.raw_to_tokens)*top_prior.raw_to_tokens\n", + " x = load_prompts(audio_files, duration, hps)\n", + " zs = top_prior.encode(x, start_level=0, end_level=len(priors), bs_chunks=x.shape[0])\n", + " zs = sample_partial_window(zs, labels, sampling_kwargs, 2, top_prior, tokens_to_sample, hps)\n", "x = vqvae.decode(zs[2:], start_level=2).cpu().numpy()" ], "execution_count": 0, From 0c203a9d388c428aff814d533668361489f974f4 Mon Sep 17 00:00:00 2001 From: SMarioMan Date: Tue, 27 Oct 2020 18:21:49 -0400 Subject: [PATCH 3/4] Make using 5b more user friendly (#2) Updated comments to make it more clear '5b' is a valid model. The notebook also sets some sane defaults if '5b_lyrics' is the selected model. Updated the notebook so these defaults will also be used for '5b' which prevents out of memory exceptions from occurring on colab. Co-authored-by: mtferry <39018371+mtferry@users.noreply.github.com> --- jukebox/Interacting_with_Jukebox.ipynb | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/jukebox/Interacting_with_Jukebox.ipynb b/jukebox/Interacting_with_Jukebox.ipynb index 9deeb33431..f5ae33d9d1 100644 --- a/jukebox/Interacting_with_Jukebox.ipynb +++ b/jukebox/Interacting_with_Jukebox.ipynb @@ -135,15 +135,15 @@ "colab": {} }, "source": [ - "model = \"5b_lyrics\" # or \"1b_lyrics\"\n", + "model = '5b_lyrics' # or '5b' or '1b_lyrics'\n", "hps = Hyperparams()\n", "hps.sr = 44100\n", - "hps.n_samples = 3 if model=='5b_lyrics' else 8\n", + "hps.n_samples = 3 if model in ('5b', '5b_lyrics') else 8\n", "# Specifies the directory to save the sample in.\n", "# We set this to the Google Drive mount point.\n", "hps.name = '/content/gdrive/My Drive/samples'\n", - "chunk_size = 16 if model==\"5b_lyrics\" else 32\n", - "max_batch_size = 3 if model==\"5b_lyrics\" else 16\n", + "chunk_size = 16 if model in ('5b', '5b_lyrics') else 32\n", + "max_batch_size = 3 if model in ('5b', '5b_lyrics') else 16\n", "hps.levels = 3\n", "hps.hop_fraction = [.5,.5,.125]\n", "\n", @@ -405,9 +405,9 @@ "sampling_temperature = .98\n", "\n", "lower_batch_size = 16\n", - "max_batch_size = 3 if model == \"5b_lyrics\" else 16\n", + "max_batch_size = 3 if model in ('5b', '5b_lyrics') else 16\n", "lower_level_chunk_size = 32\n", - "chunk_size = 16 if model == \"5b_lyrics\" else 32\n", + "chunk_size = 16 if model in ('5b', '5b_lyrics') else 32\n", "sampling_kwargs = [dict(temp=.99, fp16=True, max_batch_size=lower_batch_size,\n", " chunk_size=lower_level_chunk_size),\n", " dict(temp=0.99, fp16=True, max_batch_size=lower_batch_size,\n", @@ -611,13 +611,13 @@ "model = \"5b_lyrics\" # or \"1b_lyrics\"\n", "hps = Hyperparams()\n", "hps.sr = 44100\n", - "hps.n_samples = 3 if model=='5b_lyrics' else 16\n", + "hps.n_samples = 3 if model in ('5b', '5b_lyrics') else 16\n", "# Specifies the directory to save the sample in.\n", "# We set this to the Google Drive mount point.\n", "hps.name = '/content/gdrive/My Drive/co_composer'\n", - "hps.sample_length = 1048576 if model==\"5b_lyrics\" else 786432 \n", - "chunk_size = 16 if model==\"5b_lyrics\" else 32\n", - "max_batch_size = 3 if model==\"5b_lyrics\" else 16\n", + "hps.sample_length = 1048576 if model in ('5b', '5b_lyrics') else 786432 \n", + "chunk_size = 16 if model in ('5b', '5b_lyrics') else 32\n", + "max_batch_size = 3 if model in ('5b', '5b_lyrics') else 16\n", "hps.hop_fraction = [.5, .5, .125] \n", "hps.levels = 3\n", "\n", @@ -804,9 +804,9 @@ "sampling_temperature = .98\n", "\n", "lower_batch_size = 16\n", - "max_batch_size = 3 if model == \"5b_lyrics\" else 16\n", + "max_batch_size = 3 if model in ('5b', '5b_lyrics') else 16\n", "lower_level_chunk_size = 32\n", - "chunk_size = 16 if model == \"5b_lyrics\" else 32\n", + "chunk_size = 16 if model in ('5b', '5b_lyrics') else 32\n", "sampling_kwargs = dict(temp=sampling_temperature, fp16=True, max_batch_size=lower_batch_size,\n", " chunk_size=lower_level_chunk_size)" ], @@ -1260,4 +1260,4 @@ "outputs": [] } ] -} \ No newline at end of file +} From 8876cfe590d59c39350f616a3bc9174510f1abe0 Mon Sep 17 00:00:00 2001 From: Kenneth Lamar Date: Fri, 20 Nov 2020 10:26:29 -0500 Subject: [PATCH 4/4] Change default sample length Provide a warning about sample length of runtime --- jukebox/Interacting_with_Jukebox.ipynb | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/jukebox/Interacting_with_Jukebox.ipynb b/jukebox/Interacting_with_Jukebox.ipynb index f5ae33d9d1..0c06a46ae5 100644 --- a/jukebox/Interacting_with_Jukebox.ipynb +++ b/jukebox/Interacting_with_Jukebox.ipynb @@ -266,7 +266,9 @@ "colab_type": "text" }, "source": [ - "Specify your choice of artist, genre, lyrics, and length of musical sample. " + "Specify your choice of artist, genre, lyrics, and length of musical sample. \n", + "\n", + "IMPORTANT: The sample length is crucial for how long your sample takes to generate. Generating a shorter sample takes less time. You are limited to 12 hours on the Google Colab free tier. A 50 second sample should be short enough to fully generate after 12 hours of processing. " ] }, { @@ -277,7 +279,7 @@ "colab": {} }, "source": [ - "sample_length_in_seconds = 71 # Full length of musical sample to generate - we find songs in the 1 to 4 minute\n", + "sample_length_in_seconds = 50 # Full length of musical sample to generate - we find songs in the 1 to 4 minute\n", " # range work well, with generation time proportional to sample length. \n", " # This total length affects how quickly the model \n", " # progresses through lyrics (model also generates differently\n", @@ -1260,4 +1262,4 @@ "outputs": [] } ] -} +} \ No newline at end of file