Skip to content

Commit ec16a4e

Browse files
akoumpaashors1
authored andcommitted
fix operator precedence (#9403)
Signed-off-by: Alexandros Koumparoulis <akoumparouli@nvidia.com> Signed-off-by: ashors1 <ashors@nvidia.com>
1 parent 64ba014 commit ec16a4e

File tree

1 file changed

+2
-2
lines changed
  • nemo/collections/llm/gpt/model

1 file changed

+2
-2
lines changed

nemo/collections/llm/gpt/model/base.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ def gpt_forward_step(model, batch) -> torch.Tensor:
176176
def get_batch_on_this_context_parallel_rank(batch):
177177
from megatron.core import parallel_state
178178

179-
if cp_size := parallel_state.get_context_parallel_world_size() > 1:
179+
if (cp_size := parallel_state.get_context_parallel_world_size()) > 1:
180180
num_valid_tokens_in_ub = None
181181
if 'loss_mask' in batch and batch['loss_mask'] is not None:
182182
num_valid_tokens_in_ub = batch['loss_mask'].sum()
@@ -206,7 +206,7 @@ def get_packed_seq_params(batch):
206206

207207
cu_seqlens = batch['cu_seqlens'].squeeze() # remove batch size dimension (mbs=1)
208208
# remove -1 "paddings" added in collate_fn
209-
if cu_seqlens_argmin := batch.get('cu_seqlens_argmin', None) is not None:
209+
if (cu_seqlens_argmin := batch.get('cu_seqlens_argmin', None)) is not None:
210210
# pre-compute cu_seqlens_argmin in dataset class for perf
211211
cu_seqlens = cu_seqlens[: cu_seqlens_argmin.item()]
212212
else:

0 commit comments

Comments
 (0)