Stitch any CI into Tangled
107
fork

Configure Feed

Select the types of activity you want to include in your feed.

dont need all the info logs, this is all debug

authored by

Dylan Shepard and committed by
Tangled
e7055aa9 989b8f3f

+38 -38
+38 -38
provider_tekton.go
··· 301 301 "pipeline_run", ref.PipelineRunName, 302 302 ) 303 303 304 - logger.Info("watchPipelineRun: starting") 304 + logger.Debug("watchPipelineRun: starting") 305 305 306 306 last := "" 307 307 if obj, err := p.dyn.Resource(pipelineRunsGVR).Namespace(ref.Namespace). 308 308 Get(ctx, ref.PipelineRunName, metav1.GetOptions{}); err == nil { 309 309 status, terminal, ok := mapTektonPipelineRunStatus(obj) 310 - logger.Info("watchPipelineRun: initial status read", 310 + logger.Debug("watchPipelineRun: initial status read", 311 311 "status", status, "terminal", terminal, "ok", ok, 312 312 ) 313 313 if ok { ··· 317 317 logger.Error("publish tekton status", "err", err, "status", status) 318 318 } 319 319 if terminal { 320 - logger.Info("watchPipelineRun: already terminal on initial read; exiting", "status", status) 320 + logger.Debug("watchPipelineRun: already terminal on initial read; exiting", "status", status) 321 321 return 322 322 } 323 323 } ··· 335 335 ).String(), 336 336 }) 337 337 if err != nil { 338 - logger.Info("watchPipelineRun: watch failed; falling back to polling", "err", err) 338 + logger.Debug("watchPipelineRun: watch failed; falling back to polling", "err", err) 339 339 p.pollPipelineRun(ctx, ref, logger, last) 340 340 return 341 341 } 342 342 defer w.Stop() 343 343 344 - logger.Info("watchPipelineRun: watch established; entering event loop") 344 + logger.Debug("watchPipelineRun: watch established; entering event loop") 345 345 for { 346 346 select { 347 347 case <-ctx.Done(): 348 - logger.Info("watchPipelineRun: context cancelled") 348 + logger.Debug("watchPipelineRun: context cancelled") 349 349 return 350 350 case ev, ok := <-w.ResultChan(): 351 351 if !ok { 352 - logger.Info("watchPipelineRun: watch channel closed; falling back to polling") 352 + logger.Debug("watchPipelineRun: watch channel closed; falling back to polling") 353 353 p.pollPipelineRun(ctx, ref, logger, last) 354 354 return 355 355 } ··· 359 359 continue 360 360 } 361 361 status, terminal, ok := mapTektonPipelineRunStatus(obj) 362 - logger.Info("watchPipelineRun: watch event", 362 + logger.Debug("watchPipelineRun: watch event", 363 363 "event_type", ev.Type, 364 364 "status", status, "terminal", terminal, "ok", ok, "last", last, 365 365 ) 366 366 if !ok || status == last { 367 367 if terminal { 368 - logger.Info("watchPipelineRun: terminal status unchanged; exiting", "status", status) 368 + logger.Debug("watchPipelineRun: terminal status unchanged; exiting", "status", status) 369 369 return 370 370 } 371 371 continue ··· 376 376 logger.Error("publish tekton status", "err", err, "status", status) 377 377 continue 378 378 } 379 - logger.Info("watchPipelineRun: published status", "status", status, "terminal", terminal) 379 + logger.Debug("watchPipelineRun: published status", "status", status, "terminal", terminal) 380 380 if terminal { 381 - logger.Info("watchPipelineRun: terminal status reached; exiting", "status", status) 381 + logger.Debug("watchPipelineRun: terminal status reached; exiting", "status", status) 382 382 return 383 383 } 384 384 } ··· 391 391 logger *slog.Logger, 392 392 last string, 393 393 ) { 394 - logger.Info("pollPipelineRun: starting poll loop", "interval", "5s") 394 + logger.Debug("pollPipelineRun: starting poll loop", "interval", "5s") 395 395 ticker := time.NewTicker(5 * time.Second) 396 396 defer ticker.Stop() 397 397 for { 398 398 select { 399 399 case <-ctx.Done(): 400 - logger.Info("pollPipelineRun: context cancelled") 400 + logger.Debug("pollPipelineRun: context cancelled") 401 401 return 402 402 case <-ticker.C: 403 403 obj, err := p.dyn.Resource(pipelineRunsGVR).Namespace(ref.Namespace). ··· 411 411 continue 412 412 } 413 413 status, terminal, ok := mapTektonPipelineRunStatus(obj) 414 - logger.Info("pollPipelineRun: poll tick", 414 + logger.Debug("pollPipelineRun: poll tick", 415 415 "status", status, "terminal", terminal, "ok", ok, "last", last, 416 416 ) 417 417 if !ok || status == last { 418 418 if terminal { 419 - logger.Info("pollPipelineRun: terminal status unchanged; exiting", "status", status) 419 + logger.Debug("pollPipelineRun: terminal status unchanged; exiting", "status", status) 420 420 return 421 421 } 422 422 continue ··· 427 427 logger.Error("publish tekton status", "err", err, "status", status) 428 428 continue 429 429 } 430 - logger.Info("pollPipelineRun: published status", "status", status, "terminal", terminal) 430 + logger.Debug("pollPipelineRun: published status", "status", status, "terminal", terminal) 431 431 if terminal { 432 - logger.Info("pollPipelineRun: terminal status reached; exiting", "status", status) 432 + logger.Debug("pollPipelineRun: terminal status reached; exiting", "status", status) 433 433 return 434 434 } 435 435 } ··· 452 452 condStatus, _ := cond["status"].(string) 453 453 reason, _ := cond["reason"].(string) 454 454 message, _ := cond["message"].(string) 455 - slog.Info("mapTektonPipelineRunStatus: condition", 455 + slog.Debug("mapTektonPipelineRunStatus: condition", 456 456 "pipeline_run", obj.GetName(), 457 457 "type", condType, 458 458 "status", condStatus, ··· 502 502 if err != nil { 503 503 return nil, err 504 504 } 505 - p.log.Info("Logs: found TaskRuns for PipelineRun", 505 + p.log.Debug("Logs: found TaskRuns for PipelineRun", 506 506 "pipeline_run", ref.PipelineRunName, "count", len(taskRuns), 507 507 ) 508 508 if len(taskRuns) == 0 { ··· 510 510 } 511 511 512 512 terminal := p.isPipelineRunTerminal(ctx, *ref) 513 - p.log.Info("Logs: pipeline run terminal state", "pipeline_run", ref.PipelineRunName, "terminal", terminal) 513 + p.log.Debug("Logs: pipeline run terminal state", "pipeline_run", ref.PipelineRunName, "terminal", terminal) 514 514 515 515 out := make(chan LogLine, 32) 516 516 go func() { ··· 521 521 if taskName == "" { 522 522 taskName = fmt.Sprintf("task %d", stepID) 523 523 } 524 - p.log.Info("Logs: streaming TaskRun", "task_run", taskName, "step_id", stepID, "terminal", terminal) 524 + p.log.Debug("Logs: streaming TaskRun", "task_run", taskName, "step_id", stepID, "terminal", terminal) 525 525 if !sendLine(ctx, out, LogLine{ 526 526 Kind: LogKindControl, 527 527 Time: time.Now(), ··· 547 547 }) { 548 548 return 549 549 } 550 - p.log.Info("Logs: finished TaskRun", "task_run", taskName, "step_id", stepID) 550 + p.log.Debug("Logs: finished TaskRun", "task_run", taskName, "step_id", stepID) 551 551 stepID++ 552 552 } 553 - p.log.Info("Logs: all TaskRuns streamed", "pipeline_run", ref.PipelineRunName) 553 + p.log.Debug("Logs: all TaskRuns streamed", "pipeline_run", ref.PipelineRunName) 554 554 }() 555 555 return out, nil 556 556 } ··· 560 560 obj, err := p.dyn.Resource(pipelineRunsGVR).Namespace(ref.Namespace). 561 561 Get(ctx, ref.PipelineRunName, metav1.GetOptions{}) 562 562 if err != nil { 563 - p.log.Info("isPipelineRunTerminal: failed to get PipelineRun", "err", err, "pipeline_run", ref.PipelineRunName) 563 + p.log.Debug("isPipelineRunTerminal: failed to get PipelineRun", "err", err, "pipeline_run", ref.PipelineRunName) 564 564 return false 565 565 } 566 566 _, terminal, ok := mapTektonPipelineRunStatus(obj) 567 - p.log.Info("isPipelineRunTerminal: status check", "pipeline_run", ref.PipelineRunName, "terminal", terminal, "ok", ok) 567 + p.log.Debug("isPipelineRunTerminal: status check", "pipeline_run", ref.PipelineRunName, "terminal", terminal, "ok", ok) 568 568 return ok && terminal 569 569 } 570 570 ··· 582 582 trName := tr.GetName() 583 583 pods, err := p.podsForTaskRun(ctx, ref.Namespace, trName) 584 584 if err != nil { 585 - p.log.Info("fetchCompletedTaskRunLogs: list pods failed", "err", err, 585 + p.log.Debug("fetchCompletedTaskRunLogs: list pods failed", "err", err, 586 586 "task_run", trName, "pipeline_run", ref.PipelineRunName, 587 587 ) 588 588 return 589 589 } 590 - p.log.Info("fetchCompletedTaskRunLogs: found pods", 590 + p.log.Debug("fetchCompletedTaskRunLogs: found pods", 591 591 "task_run", trName, "pod_count", len(pods), 592 592 ) 593 593 ··· 608 608 if msg != "" { 609 609 line += " " + msg 610 610 } 611 - p.log.Info("fetchCompletedTaskRunLogs: step terminated", 611 + p.log.Debug("fetchCompletedTaskRunLogs: step terminated", 612 612 "task_run", trName, "step", stepName, 613 613 "exit_code", exitCode, "reason", reason, 614 614 ) ··· 625 625 626 626 for _, pod := range pods { 627 627 containers := append(pod.Spec.InitContainers, pod.Spec.Containers...) 628 - p.log.Info("fetchCompletedTaskRunLogs: reading pod containers", 628 + p.log.Debug("fetchCompletedTaskRunLogs: reading pod containers", 629 629 "pod", pod.Name, "container_count", len(containers), 630 630 ) 631 631 for _, c := range containers { 632 - p.log.Info("fetchCompletedTaskRunLogs: reading container logs", 632 + p.log.Debug("fetchCompletedTaskRunLogs: reading container logs", 633 633 "pod", pod.Name, "container", c.Name, 634 634 ) 635 635 req := p.kube.CoreV1().Pods(ref.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ ··· 637 637 }) 638 638 rc, err := req.Stream(ctx) 639 639 if err != nil { 640 - p.log.Info("fetchCompletedTaskRunLogs: stream failed", "err", err, 640 + p.log.Debug("fetchCompletedTaskRunLogs: stream failed", "err", err, 641 641 "pod", pod.Name, "container", c.Name, 642 642 ) 643 643 continue 644 644 } 645 645 p.sendReaderLines(ctx, out, rc, stepID) 646 646 _ = rc.Close() 647 - p.log.Info("fetchCompletedTaskRunLogs: done reading container", 647 + p.log.Debug("fetchCompletedTaskRunLogs: done reading container", 648 648 "pod", pod.Name, "container", c.Name, 649 649 ) 650 650 } ··· 676 676 ) { 677 677 pods, err := p.podsForTaskRun(ctx, ref.Namespace, tr.GetName()) 678 678 if err != nil { 679 - p.log.Info("streamTaskRunLogs: list pods for TaskRun failed", "err", err, 679 + p.log.Debug("streamTaskRunLogs: list pods for TaskRun failed", "err", err, 680 680 "task_run", tr.GetName(), "pipeline_run", ref.PipelineRunName, 681 681 ) 682 682 return 683 683 } 684 - p.log.Info("streamTaskRunLogs: found pods", 684 + p.log.Debug("streamTaskRunLogs: found pods", 685 685 "task_run", tr.GetName(), "pod_count", len(pods), 686 686 ) 687 687 for _, pod := range pods { 688 688 containers := append(pod.Spec.InitContainers, pod.Spec.Containers...) 689 - p.log.Info("streamTaskRunLogs: streaming pod containers", 689 + p.log.Debug("streamTaskRunLogs: streaming pod containers", 690 690 "pod", pod.Name, "container_count", len(containers), 691 691 ) 692 692 for _, c := range containers { 693 - p.log.Info("streamTaskRunLogs: streaming container", 693 + p.log.Debug("streamTaskRunLogs: streaming container", 694 694 "pod", pod.Name, "container", c.Name, "step_id", stepID, 695 695 ) 696 696 req := p.kube.CoreV1().Pods(ref.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{ ··· 698 698 }) 699 699 rc, err := req.Stream(ctx) 700 700 if err != nil { 701 - p.log.Info("streamTaskRunLogs: stream pod logs failed", "err", err, 701 + p.log.Debug("streamTaskRunLogs: stream pod logs failed", "err", err, 702 702 "pod", pod.Name, "container", c.Name, 703 703 ) 704 704 continue 705 705 } 706 706 p.sendReaderLines(ctx, out, rc, stepID) 707 707 _ = rc.Close() 708 - p.log.Info("streamTaskRunLogs: finished container", 708 + p.log.Debug("streamTaskRunLogs: finished container", 709 709 "pod", pod.Name, "container", c.Name, 710 710 ) 711 711 }