this repo has no description
2
fork

Configure Feed

Select the types of activity you want to include in your feed.

darwin + opencode

+210 -1017
+138 -11
flake.lock
··· 97 97 "llm-agents", 98 98 "nixpkgs" 99 99 ], 100 - "systems": "systems_5" 100 + "systems": [ 101 + "llm-agents", 102 + "systems" 103 + ] 101 104 }, 102 105 "locked": { 103 106 "lastModified": 1771437256, ··· 130 133 "type": "github" 131 134 } 132 135 }, 136 + "bun2nix": { 137 + "inputs": { 138 + "flake-parts": [ 139 + "llm-agents", 140 + "flake-parts" 141 + ], 142 + "import-tree": "import-tree", 143 + "nixpkgs": [ 144 + "llm-agents", 145 + "nixpkgs" 146 + ], 147 + "systems": [ 148 + "llm-agents", 149 + "systems" 150 + ], 151 + "treefmt-nix": [ 152 + "llm-agents", 153 + "treefmt-nix" 154 + ] 155 + }, 156 + "locked": { 157 + "lastModified": 1770895533, 158 + "narHash": "sha256-v3QaK9ugy9bN9RXDnjw0i2OifKmz2NnKM82agtqm/UY=", 159 + "owner": "nix-community", 160 + "repo": "bun2nix", 161 + "rev": "c843f477b15f51151f8c6bcc886954699440a6e1", 162 + "type": "github" 163 + }, 164 + "original": { 165 + "owner": "nix-community", 166 + "repo": "bun2nix", 167 + "type": "github" 168 + } 169 + }, 133 170 "darwin": { 134 171 "inputs": { 135 172 "nixpkgs": [ ··· 368 405 } 369 406 }, 370 407 "flake-parts_2": { 408 + "inputs": { 409 + "nixpkgs-lib": [ 410 + "llm-agents", 411 + "nixpkgs" 412 + ] 413 + }, 414 + "locked": { 415 + "lastModified": 1772408722, 416 + "narHash": "sha256-rHuJtdcOjK7rAHpHphUb1iCvgkU3GpfvicLMwwnfMT0=", 417 + "owner": "hercules-ci", 418 + "repo": "flake-parts", 419 + "rev": "f20dc5d9b8027381c474144ecabc9034d6a839a3", 420 + "type": "github" 421 + }, 422 + "original": { 423 + "owner": "hercules-ci", 424 + "repo": "flake-parts", 425 + "type": "github" 426 + } 427 + }, 428 + "flake-parts_3": { 371 429 "inputs": { 372 430 "nixpkgs-lib": [ 373 431 "nur", ··· 694 752 "original": { 695 753 "type": "tarball", 696 754 "url": "https://github.com/IBM/plex/releases/download/@ibm/plex-mono@1.1.0/ibm-plex-mono.zip" 755 + } 756 + }, 757 + "import-tree": { 758 + "locked": { 759 + "lastModified": 1763762820, 760 + "narHash": "sha256-ZvYKbFib3AEwiNMLsejb/CWs/OL/srFQ8AogkebEPF0=", 761 + "owner": "vic", 762 + "repo": "import-tree", 763 + "rev": "3c23749d8013ec6daa1d7255057590e9ca726646", 764 + "type": "github" 765 + }, 766 + "original": { 767 + "owner": "vic", 768 + "repo": "import-tree", 769 + "type": "github" 697 770 } 698 771 }, 699 772 "indigo": { ··· 746 819 "type": "github" 747 820 } 748 821 }, 822 + "learning-goal": { 823 + "flake": false, 824 + "locked": { 825 + "lastModified": 1772311752, 826 + "narHash": "sha256-lvQdDRuC9H+8F4Fud753c6NogVdZtGqgtjKbiSVZCig=", 827 + "owner": "DrCatHicks", 828 + "repo": "learning-goal", 829 + "rev": "cc7e3a6c7f0917501f1fd422bad81ab6f5040050", 830 + "type": "github" 831 + }, 832 + "original": { 833 + "owner": "DrCatHicks", 834 + "repo": "learning-goal", 835 + "type": "github" 836 + } 837 + }, 838 + "learning-opportunities": { 839 + "flake": false, 840 + "locked": { 841 + "lastModified": 1773158564, 842 + "narHash": "sha256-xMpy9XxMaNCIAOr2dffrc5dyRt56jlam+XQjrNapsEw=", 843 + "owner": "DrCatHicks", 844 + "repo": "learning-opportunities", 845 + "rev": "e5f985d376461993253d285096ed0f4b4a095858", 846 + "type": "github" 847 + }, 848 + "original": { 849 + "owner": "DrCatHicks", 850 + "repo": "learning-opportunities", 851 + "type": "github" 852 + } 853 + }, 749 854 "llm-agents": { 750 855 "inputs": { 751 856 "blueprint": "blueprint", 857 + "bun2nix": "bun2nix", 858 + "flake-parts": "flake-parts_2", 752 859 "nixpkgs": "nixpkgs_3", 860 + "systems": "systems_5", 753 861 "treefmt-nix": "treefmt-nix_2" 754 862 }, 755 863 "locked": { 756 - "lastModified": 1772596195, 757 - "narHash": "sha256-RZHuv6e3rH3t8QsyHPue1L6w8I+d023/LdVmOezLWDg=", 864 + "lastModified": 1773068014, 865 + "narHash": "sha256-5WOIh+mgU1TLBVLOCgJDc3vSlx20saK+VYVZTl6MkSo=", 758 866 "owner": "numtide", 759 867 "repo": "llm-agents.nix", 760 - "rev": "92d68fc403787815a8f213b5bc2065d7010e1efe", 868 + "rev": "51efc59e9d492d10a2d85d57ba764898df71d0b9", 761 869 "type": "github" 762 870 }, 763 871 "original": { ··· 1033 1141 }, 1034 1142 "nixpkgs_3": { 1035 1143 "locked": { 1036 - "lastModified": 1772554988, 1037 - "narHash": "sha256-8Kb+MSE6QYVX1S96aZOluOMVfvSEOs70vgX980qVUaY=", 1144 + "lastModified": 1772956932, 1145 + "narHash": "sha256-M0yS4AafhKxPPmOHGqIV0iKxgNO8bHDWdl1kOwGBwRY=", 1038 1146 "owner": "NixOS", 1039 1147 "repo": "nixpkgs", 1040 - "rev": "87f6b6e02cb3f87a1be4f939326c94c8af9d55d8", 1148 + "rev": "608d0cadfed240589a7eea422407a547ad626a14", 1041 1149 "type": "github" 1042 1150 }, 1043 1151 "original": { ··· 1097 1205 }, 1098 1206 "nur": { 1099 1207 "inputs": { 1100 - "flake-parts": "flake-parts_2", 1208 + "flake-parts": "flake-parts_3", 1101 1209 "nixpkgs": "nixpkgs_6" 1102 1210 }, 1103 1211 "locked": { ··· 1114 1222 "type": "github" 1115 1223 } 1116 1224 }, 1225 + "opencode-handoff": { 1226 + "flake": false, 1227 + "locked": { 1228 + "lastModified": 1772630975, 1229 + "narHash": "sha256-1wDwwz7gcKLeCr0kqwQtQi7UWf12AYiPDL8YT9iFO08=", 1230 + "owner": "Chickensoupwithrice", 1231 + "repo": "opencode-handoff", 1232 + "rev": "b18d546e567c8c15c7ce8377f82f1b81cd838890", 1233 + "type": "github" 1234 + }, 1235 + "original": { 1236 + "owner": "Chickensoupwithrice", 1237 + "repo": "opencode-handoff", 1238 + "type": "github" 1239 + } 1240 + }, 1117 1241 "poonam": { 1118 1242 "inputs": { 1119 1243 "nixpkgs": [ ··· 1180 1304 "homebrew-cask": "homebrew-cask", 1181 1305 "homebrew-core": "homebrew-core", 1182 1306 "jovian": "jovian", 1307 + "learning-goal": "learning-goal", 1308 + "learning-opportunities": "learning-opportunities", 1183 1309 "llm-agents": "llm-agents", 1184 1310 "nix-homebrew": "nix-homebrew", 1185 1311 "nix-matrix-appservices": "nix-matrix-appservices", 1186 1312 "nixos-hardware": "nixos-hardware", 1187 1313 "nixpkgs": "nixpkgs_5", 1188 1314 "nur": "nur", 1315 + "opencode-handoff": "opencode-handoff", 1189 1316 "poonam": "poonam", 1190 1317 "rust-overlay": "rust-overlay", 1191 1318 "tangled": "tangled", ··· 1448 1575 ] 1449 1576 }, 1450 1577 "locked": { 1451 - "lastModified": 1770228511, 1452 - "narHash": "sha256-wQ6NJSuFqAEmIg2VMnLdCnUc0b7vslUohqqGGD+Fyxk=", 1578 + "lastModified": 1772660329, 1579 + "narHash": "sha256-IjU1FxYqm+VDe5qIOxoW+pISBlGvVApRjiw/Y/ttJzY=", 1453 1580 "owner": "numtide", 1454 1581 "repo": "treefmt-nix", 1455 - "rev": "337a4fe074be1042a35086f15481d763b8ddc0e7", 1582 + "rev": "3710e0e1218041bbad640352a0440114b1e10428", 1456 1583 "type": "github" 1457 1584 }, 1458 1585 "original": {
+13
flake.nix
··· 77 77 78 78 llm-agents.url = "github:numtide/llm-agents.nix"; 79 79 80 + opencode-handoff = { 81 + url = "github:Chickensoupwithrice/opencode-handoff"; 82 + flake = false; 83 + }; 84 + learning-opportunities = { 85 + url = "github:DrCatHicks/learning-opportunities"; 86 + flake = false; 87 + }; 88 + learning-goal = { 89 + url = "github:DrCatHicks/learning-goal"; 90 + flake = false; 91 + }; 92 + 80 93 # Others 81 94 nur.url = "github:nix-community/NUR"; 82 95 rust-overlay = {
+27 -1
home/profiles/opencode/default.nix
··· 17 17 18 18 # github-mcp-server binary path from nixpkgs 19 19 githubMcpServer = "${pkgs.github-mcp-server}/bin/github-mcp-server"; 20 + 21 + # Plugin/skill sources fetched via flake inputs 22 + handoffSrc = inputs.opencode-handoff; 23 + learningOpportunitiesSrc = inputs.learning-opportunities; 24 + learningGoalSrc = inputs.learning-goal; 20 25 in 21 26 { 22 27 home.packages = [ ··· 65 70 "opencode/themes".source = ./themes; 66 71 "opencode/agents".source = ./agents; 67 72 "opencode/commands".source = ./commands; 68 - "opencode/skills".source = ./skills; 73 + 74 + # Local skills 75 + "opencode/skills/tmux".source = ./skills/tmux; 76 + "opencode/skills/session-search.disabled".source = ./skills/session-search.disabled; 77 + # Skills from upstream repos (via flake inputs) 78 + "opencode/skills/learning-opportunities".source = 79 + "${learningOpportunitiesSrc}/learning-opportunities/skills/learning-opportunities"; 80 + "opencode/skills/learning-goal".source = "${learningGoalSrc}/learning-goal/skills/learning-goal"; 81 + "opencode/skills/orient".source = "${learningOpportunitiesSrc}/orient/skills/orient"; 69 82 }; 70 83 71 84 home.file = lib.mkIf isBox { ··· 80 93 # module resolution can find dependencies if they're ever needed. 81 94 home.activation.opencode-plugins = lib.hm.dag.entryAfter [ "writeBoundary" ] '' 82 95 mkdir -p "$HOME/.config/opencode/plugins" 96 + mkdir -p "$HOME/.config/opencode/plugins/handoff" 97 + # Local plugins (top-level files are auto-loaded by OpenCode) 83 98 install -m644 ${./plugins/learning-opportunities-auto.js} \ 84 99 "$HOME/.config/opencode/plugins/learning-opportunities-auto.js" 100 + # opencode-handoff: copy entry point and patch imports to use subdir. 101 + # OpenCode globs plugins/*.{ts,js} so only handoff.ts is auto-loaded. 102 + ${pkgs.gnused}/bin/sed \ 103 + -e 's|from "./tools"|from "./handoff/tools"|' \ 104 + -e 's|from "./files"|from "./handoff/files"|' \ 105 + ${handoffSrc}/src/plugin.ts > "$HOME/.config/opencode/plugins/handoff.ts" 106 + cp -f ${handoffSrc}/src/tools.ts "$HOME/.config/opencode/plugins/handoff/tools.ts" 107 + cp -f ${handoffSrc}/src/files.ts "$HOME/.config/opencode/plugins/handoff/files.ts" 108 + cp -f ${handoffSrc}/src/vendor.ts "$HOME/.config/opencode/plugins/handoff/vendor.ts" 109 + chmod 644 "$HOME/.config/opencode/plugins/handoff.ts" \ 110 + "$HOME/.config/opencode/plugins/handoff"/*.ts 85 111 ''; 86 112 }
+28 -8
home/profiles/opencode/plugins/learning-opportunities-auto.js
··· 1 1 // learning-opportunities-auto: OpenCode plugin 2 2 // 3 3 // Port of DrCatHicks/learning-opportunities' PostToolUse hook for OpenCode. 4 - // After a git commit, nudges the agent to consider offering a learning exercise. 4 + // After a git commit, injects a context message into the session that nudges 5 + // the agent to consider offering a learning exercise via the skill. 5 6 // 6 7 // Zero external imports -- avoids Bun module resolution issues when deployed 7 - // via Nix home-manager (where the file may be copied from the Nix store). 8 + // via Nix home-manager (where the file is copied from the Nix store). 8 9 // 9 10 // License: CC-BY-4.0 10 11 ··· 17 18 if (input.tool !== "bash") return; 18 19 19 20 // Check if the command was a git commit 20 - const command = output?.args?.command || input?.args?.command || ""; 21 + const command = input?.args?.command || ""; 21 22 if (!/git\s+(commit|cz)/.test(command)) return; 22 23 23 24 // Rate limit: max 2 offers per session ··· 25 26 offers++; 26 27 27 28 try { 28 - await client.tui.showToast({ 29 + // Inject a context message into the active session. The agent sees 30 + // this as a user message with noReply, so it becomes part of the 31 + // conversation context for the next assistant turn. 32 + await client.session.prompt({ 33 + path: { id: input.sessionID }, 29 34 body: { 30 - message: 31 - "You just committed code. Consider a learning exercise! Say: load the learning-opportunities skill", 32 - variant: "info", 35 + noReply: true, 36 + parts: [ 37 + { 38 + type: "text", 39 + text: [ 40 + "<system-reminder>", 41 + "[learning-opportunities-auto] The user just committed code.", 42 + "Per the learning-opportunities skill, consider whether this", 43 + "is a good moment to offer a learning exercise. If the committed", 44 + "work involved new files, schema changes, architectural decisions,", 45 + "refactors, or unfamiliar patterns, ask the user (one short sentence)", 46 + "if they'd like a 10-15 minute exercise. Do not start the exercise", 47 + "until they confirm. If they decline, note it -- no more offers", 48 + "this session.", 49 + "</system-reminder>", 50 + ].join(" "), 51 + }, 52 + ], 33 53 }, 34 54 }); 35 55 } catch { 36 - // Toast API may not be available in all contexts (e.g., CLI mode, server mode). 56 + // Session prompt API may not be available in all contexts. 37 57 // Silently ignore -- the skill can always be invoked manually. 38 58 } 39 59 },
-185
home/profiles/opencode/skills/learning-goal/SKILL.md
··· 1 - --- 2 - name: learning-goal 3 - description: Guide the learner through a structured goal-setting exercise grounded in research on Mental Contrasting with Implementation Intentions (MCII). The exercise helps developers set concrete learning goals, visualize meaningful outcomes, anticipate realistic obstacles, and build if-then plans to overcome them. 4 - license: CC-BY-4.0 5 - --- 6 - 7 - # Goal Setting with Mental Contrasting 8 - 9 - Guide the user through a structured goal-setting exercise grounded in research on the benefits of using SMART goals and Mental Contrasting with Implementation Intentions (MCII) to increase persistence and deeper behavioral commitment. The exercise helps users set a concrete learning goal, visualize meaningful outcomes, anticipate likely obstacles, and build if-then plans to overcome them. 10 - 11 - This exercise takes approximately 10-15 minutes. 12 - 13 - ## When to Offer 14 - 15 - Offer this exercise when a user: 16 - - Makes an explicit learning goal request, such as directly asking about goal setting, learning plans, or how to structure their skill development. 17 - - Project kickoff. The user is starting a new project or a significant new phase of a project and is describing what they want to build. 18 - 19 - Example triggers: "help me set a learning goal," "I want to get better at X," "can we do the goal-setting exercise," "I'm starting a new project and want to plan my learning", "How should I approach learning this?" 20 - 21 - ## When not to offer 22 - - Do not offer this exercise mid-task 23 - - User declined a goal-setting exercise offer this session 24 - 25 - Keep offers brief and non-repetitive. One short sentence is enough. 26 - 27 - ## How to Facilitate 28 - 29 - The effectiveness of this exercise depends on the user generating their own content. Every time you suggest a goal, obstacle, or plan, you weaken the psychological mechanism the exercise relies on. When in doubt, ask a question and stop. 30 - You are a learning coach, not a lecturer. Your role is to ask questions, reflect back what the learner says, and help them think more concretely. Keep your responses short. Let the user do most of the talking. Do not rush through the steps. 31 - 32 - Pacing for interactive environments: Each step should be its own conversational turn. Do not combine steps. Ask one question, wait for the response, then move to the next step. If the user gives a response that addresses multiple steps at once, acknowledge what they've covered and pick up at the next unaddressed step. 33 - 34 - ### Core Principle: Pause for input 35 - 36 - End your message immediately after the question. Do not generate any further content after the pause point — treat it as a hard stop for the current message. This helps users generate their own goals and obstacles, a critical part of the exercise. 37 - 38 - After the pause point, do not generate: 39 - 40 - - Suggested or example responses 41 - - Hints disguised as encouragement ("Think about...", "Consider...") 42 - - Multiple questions in sequence 43 - - Italicized or parenthetical clues about the answer 44 - - Any teaching content 45 - 46 - Pause points follow this pattern: 47 - 48 - - Pose a specific question or task 49 - - Do not provide any prompt suggestions 50 - - Wait for the user's response (do not continue until they reply) 51 - - After their response, continue to the next step 52 - 53 - Example of what NOT to do: 54 - "What skill would you like to grow in? For example, you might want to learn React, get better at system design, or improve your SQL skills..." 55 - Example of what TO do: 56 - "What's a skill you'd like to grow in — something specific that connects to your work or a project you care about?" 57 - 58 - ### Step 1: Set a Learning Goal 59 - 60 - Ask the learner to name a specific skill or area they want to grow in. Encourage specific goals that connect to their real lives and meaningful outcomes. Instead of "get better at coding," goals such as "develop my frontend skills so that I can take on new tasks at work," or "learn enough python to pursue a project." 61 - 62 - If the goal is vague, help them narrow it by asking: 63 - - What would it look like to have this skill? 64 - - What could you do that you can't do now? 65 - - Is there a specific project or situation where this matters? 66 - 67 - Do not rewrite their goal for them. Help them sharpen it in their own words. 68 - 69 - ### Step 2: Strengthen the Goal by defining the SMART goal version 70 - 71 - Once the learner has stated their goal, guide them through reflecting on and refining it. Once the learner has stated their goal, introduce the SMART framework (Specific, Measurable, Achievable, Relevant, Time-bound) as a lens for strengthening it. 72 - 73 - Guide the learner in strengthening their goal using the SMART framework. Reference [PRINCIPLES.md](https://github.com/DrCatHicks/learning-goal/blob/main/learning-goal/skills/learning-goal/resources/PRINCIPLES.md) for detailed probing questions on each dimension. Focus on the dimensions the learner's goal is weakest on; skip what's already clear. 74 - 75 - Present the components briefly, then work through them conversationally as dimensions to think through. Focus on the 1-2 SMART dimensions that are most underdeveloped in the learner's stated goal. Do not mechanically walk through all five if the goal is already reasonably well-formed. Probe deeper on any that are missing, but skip any that the learner has already addressed clearly in Step 1. 76 - 77 - **Specific.** Can they define what success looks like in detailed terms? Ask: "How would you know you'd gotten there? What could you point to?" 78 - 79 - **Measurable.** Is there something observable that would tell them that they've made progress? This doesn't have to be a number. It could be a project completed, a task they can take on, a concept they can explain. Ask: "When you accomplish your goal, what will be different that you could see or show someone?" 80 - 81 - **Achievable.** Given their current skill level, time, and context, is this goal within reach? It should be challenging but possible. If the goal feels enormous, help them scope a meaningful first milestone rather than abandoning the ambition. Ask: "Does this feel like a stretch you can make? What would a realistic version of this look like?" 82 - 83 - **Relevant.** Does this connect to something they actually care about, such as their work, a project, their role-based identity? Goals that are "should" goals ("I should learn Kubernetes"") tend to lose to goals that are "want" goals ("I want to understand deployment well enough to stop being blocked by it""). Ask: "How does this connect to your larger priorities?" 84 - 85 - **Time-bound.** Goals should explicitly set a realistic timeframe, not to create pressure, but to make the goal more tangible and concrete. Ask: "What timeframe feels right for this? A week? A month? When would you want to check in with yourself on how it's going?" 86 - 87 - After working through these dimensions, offer the user the opportunity to restate their goal. It may have shifted, gotten more specific, or shrunk in scope. Reflect back what changed and why to help the learner see their own thinking process. 88 - 89 - ### Step 3: Visualize the Outcome 90 - 91 - Ask the learner to briefly describe why this goal matters to them and what it would feel like to achieve it. 92 - 93 - Prompt with: 94 - - Why do I want to achieve this learning goal? 95 - - What changes for you when you have this skill? 96 - - How does your day-to-day work feel different? 97 - - What becomes possible that isn't possible now? 98 - 99 - Keep this brief, limiting to one or two exchanges. The goal is to specifically think about why achieving the learning outcome leads to something personally meaningful. 100 - 101 - ### Step 4: Identify Obstacles 102 - 103 - This is a critical step in forming a motivational plan. Ask the users to describe a situation where they could realistically face an obstacle in the way of pursuing the goal they just planned. Obstacles can be internal (habits, tendencies, emotions), as well as external constraints (time pressure, competing obligations). 104 - 105 - Ask them to be concrete and truly imagine a real situation: 106 - - When would this obstacle come up? What time of day, what situation? 107 - - What would it feel like in the moment? 108 - - What have you done in the past when this obstacle appeared? 109 - 110 - Help them identify 1-3 obstacles. Quality matters more than quantity. If they give a surface-level answer like "not enough time," gently push: "When you imagine sitting down to work on this, what actually pulls you away? What's the feeling or thought that comes up?" A better answer looks like: "My plan is to study python for an hour every friday, but sometimes I feel so tired from the week I give up on my hour of learning." 111 - 112 - Do not suggest obstacles. The learner must generate their own. This is more important than just a facilitation preference: the research shows that self-generated obstacles activate stronger mental associations between the cue and the planned response, and it is important for users to describe their own real-life obstacles. Suggested obstacles bypass this mechanism and reduce effectiveness. 113 - - After asking the learner to identify obstacles, stop. 114 - - Do not offer examples of common obstacles, hypothetical scenarios, or "things other learners have experienced." 115 - - Wait for their response. 116 - 117 - ### Step 5: Build If-Then Plans 118 - 119 - For each obstacle the user identified, ask them to write their own if-then plan in this format: 120 - 121 - *"If [obstacle/situation], then I will [specific action]."* 122 - 123 - Do not write the if-then plan for the learner. Ask them to draft it, then help them refine it. The learner must produce the first version. This is the same generation principle as Step 4: the exercise works because the learner is the one connecting their obstacle to their planned response. 124 - 125 - Ask: "Take the first obstacle you described. Can you turn it into an if-then plan? Start with 'If...' and describe the moment you'd recognize, then 'then I will...' and name one specific thing you'd do." 126 - 127 - Then stop. Wait for their response. 128 - 129 - After the learner drafts a plan, help them sharpen it by asking whether the cue is specific enough that they'd recognize it in the moment and whether the action is small enough to actually do. If either part is vague, ask a follow-up question — do not rewrite it for them. 130 - 131 - If the if-then plan is vague, help them narrow it by asking: 132 - - What is a small concrete action you could take to overcome this obstacle? 133 - - Is there a time when you've successfully overcome this obstacle before that you can use as an example? 134 - 135 - If the learner is genuinely stuck and cannot produce a first draft after prompting, you may share one example to illustrate the format, then ask them to try again with their own obstacle: 136 - "Here's what one might look like: 'If I open my laptop to study and feel the pull to check Slack first, then I will close Slack and open my project file before doing anything else.' Now try one with the obstacle you described." 137 - 138 - ### Step 6: Reaffirm or Adjust the Goal 139 - 140 - After working through obstacles and plans, ask the learner to revisit their original goal. Now that they've thought concretely about what could get in the way: 141 - 142 - - Does the goal still feel right? 143 - - Does it need to be bigger or smaller? 144 - - Is there a first step they want to commit to? 145 - 146 - This is also the moment to check feasibility. The research shows that mental contrasting is most effective when the learner believes the goal is achievable. If the obstacle work has revealed that the goal feels unrealistic, help them rescope without judgment. Adjusting a goal based on reflection is a sign of good self-regulation. 147 - 148 - ### Step 7: Produce a Goal Card 149 - 150 - At the end of the exercise, create a brief markdown file (e.g., learning-goal.md) summarizing: 151 - 152 - ``` 153 - ## My Learning Goal 154 - [Their goal in their words] 155 - 156 - ## Why It Matters 157 - [One or two sentences from Step 2] 158 - 159 - ## My If-Then Plans 160 - - If [obstacle 1], then I will [action 1]. 161 - - If [obstacle 2], then I will [action 2]. 162 - 163 - ## My First Step 164 - [What they committed to in Step 5] 165 - ``` 166 - 167 - Offer this as something they can keep, revisit, or pin somewhere visible. 168 - 169 - ## Tone and Approach 170 - 171 - - Be warm but not effusive. This is a coaching conversation, not a therapy session. 172 - - Do not praise goals as "great" or "amazing." Instead, reflect what you hear: "So the core of it is that you want to be able to make architectural decisions confidently, not just follow patterns you've seen." 173 - - Be direct about feedback: if they're giving contradictory answers, say so clearly, then explore why without judgment. 174 - - Do not use bullet points or numbered lists when talking to the learner. Speak in natural sentences. 175 - - If the learner gives short or disengaged answers, don't push. You can note that the exercise works best with concrete details, but respect their pace. 176 - - Offer escape hatches: "Want to keep going or pause here?" 177 - - Never start the exercise without the learner's confirmation. 178 - 179 - ## Adaptability 180 - 181 - This exercise is written for developer learning goals but the structure can be beneficial to any skill development context. If the learner's goal is outside software development writing, design, leadership, communication — follow the same steps. The psychological mechanism is the same. 182 - 183 - ## References 184 - 185 - This exercise is based on Mental Contrasting with Implementation Intentions (MCII), a self-regulation strategy developed by Oettingen and Gollwitzer, and adapted and successfully tested as an intervention by [Cat Hicks](https://www.drcathicks.com/) and [John Flournoy](http://johnflournoy.science/) in our work with software teams and across hundreds of people learning technical skills in their real workplaces. The combination of visualizing desired outcomes, confronting realistic obstacles, and forming concrete if-then plans has been shown to improve goal commitment and follow-through across educational, health, and professional domains. For full references, see Principles.md
-109
home/profiles/opencode/skills/learning-goal/resources/PRINCIPLES.md
··· 1 - # Goal-Setting Principles 2 - 3 - This document provides the scientific rationale for the goal-setting exercise in SKILL.md. Consult it when you need to probe deeper on a specific SMART dimension, when a user's goal needs strengthening, or when you need to explain why a step matters. 4 - 5 - --- 6 - 7 - ## SMART Goals 8 - 9 - SMART is a framework for strengthening goal quality. It is not a checklist to force every goal through, but a lens for identifying which dimensions of a goal are underdeveloped. Most users arrive with goals that are weak on one or two dimensions. Focus on improving those dimensions. 10 - 11 - ### Specific 12 - 13 - Vague goals ("get better at Python") don't direct behavior. Specific goals identify what the person will be able to do, in what context, and to what standard. A goal is specific enough when the learner could explain it to someone else and that person would know what success looked like. 14 - 15 - **Probing questions:** What would it look like to have this skill? What could you do that you can't do now? If you achieved this, what would someone see you doing differently? 16 - 17 - **Weak -> stronger:** "Learn React" -> "Build a dashboard component using React hooks so I can contribute to the frontend at work without needing someone to pair with me." 18 - 19 - ### Measurable 20 - 21 - Measurable doesn't have to mean numeric. It means there's something observable -- a project completed, a task performed independently, a concept explained clearly. The point is that the learner can recognize when they've made progress rather than relying on a vague sense of improvement. 22 - 23 - **Probing questions:** When you accomplish this, what will be different that you could see or show someone? How would you know you're halfway there? 24 - 25 - **Weak -> stronger:** "Understand deployment" -> "Deploy a service to production without asking for help on the pipeline configuration." 26 - 27 - ### Achievable 28 - 29 - Goals should stretch but not break. If a goal is far beyond the learner's current skill, help them scope a meaningful first milestone rather than abandoning the ambition. The research on mental contrasting shows that goals perceived as achievable generate stronger commitment -- goals that feel impossible produce disengagement, not motivation. 30 - 31 - **Probing questions:** Does this feel like a stretch you can make? What would a realistic version look like given your current commitments? What's the smallest version of this goal that would still matter to you? 32 - 33 - ### Relevant 34 - 35 - Goals that connect to something the learner actually cares about -- their work, a project, their identity, their autonomy -- persist longer than goals driven by external "should." A learner who says "I should learn Kubernetes" is in a different motivational position than one who says "I want to understand deployment well enough to stop being blocked by it." Help learners find the want underneath the should. 36 - 37 - **Probing questions:** Why this goal and not something else? How does this connect to what you're working on or where you want to go? What would change for you if you had this skill? 38 - 39 - ### Time-bound 40 - 41 - A timeframe makes a goal concrete and creates a natural point for self-assessment. The purpose is not pressure -- it's tangibility. Open-ended goals ("someday I'll learn Rust") lack the urgency that supports follow-through. The timeframe should be realistic given the scope. 42 - 43 - **Probing questions:** What timeframe feels right -- a week, a month, a quarter? When would you want to check in with yourself on progress? Is there a natural deadline like a project start or a review cycle? 44 - 45 - --- 46 - 47 - ## Mental Contrasting with Implementation Intentions (MCII) 48 - 49 - MCII is a self-regulation strategy that combines two techniques: mental contrasting (visualizing a desired outcome and then confronting the obstacles that stand in the way) and implementation intentions (forming specific if-then plans to handle those obstacles). The combination is more effective than either technique alone. 50 - 51 - ### Why mental contrasting works 52 - 53 - Positive visualization alone -- just imagining success -- can actually reduce goal pursuit. It satisfies the motivational need without producing action. Mental contrasting corrects this by pairing the desired future with an honest assessment of present reality and its obstacles. This contrast creates an association between the goal and the effort required, producing stronger commitment in cases where the goal is feasible. It induces people to reframe present reality when encountering an obstacle as an obstacle to their wish fulfillment (e.g., skipping studying to attend a party is no longer conceptualized as a fun activity but as an obstacle to getting a good grade in a course). 54 - 55 - Critically, mental contrasting is most effective when the person believes the goal is achievable. When feasibility is low, mental contrasting appropriately leads people to disengage -- which is healthy self-regulation, not failure. This is why the SMART goal work in Steps 1-2 of the exercise matters: it ensures the goal is well-formed before the learner invests in contrasting. 56 - 57 - ### Why implementation intentions work 58 - 59 - > Implementation intentions are if-then plans (review by Gollwitzer, 2014) in the following format: "If the critical situation X is encountered, then I will perform the goal-directed response Y!" These implementation intentions are to be differentiated from mere goal intentions. Mere goal intentions specify end states ("I want to achieve goal X!" or "I want to exert behavior X!"). In implementation intentions, on the other hand, the if-component of an implementation intention specifies a future critical event or point in time, and the then-component specifies how one will respond once these situational cues are encountered. (Gollwitzer et al., 2018) 60 - 61 - The if-then structure works because it makes it easier for people to make the link between a situational cue and a response. Instead of relying on in-the-moment motivation or willpower, the person has already decided what to do when the obstacle arises. The research shows that implementation intentions create strong cue-response associations that operate efficiently and can even work when cognitive resources are depleted. 62 - 63 - ### Why the combination matters 64 - 65 - Mental contrasting surfaces the obstacles and regulates goal commitments, revealing that action is necessary to overcome an obstacle if one wants to reach a desired future. Implementation intentions turn those obstacles into action triggers. Without the contrasting step, the if-then plans lack personally relevant cues. Without the if-then plans, the obstacles remain discouraging rather than actionable. The combination, a self-regulation psychological exercise named the MCII, has shown effects on goal attainment across educational, health, and professional domains, enhancing engagement, increasing commitment to positive behaviors, and reducing stress. 66 - 67 - ### What makes a good implementation intention 68 - 69 - The if-part should be a specific, recognizable cue -- a situation, a feeling, a moment the learner can picture. The then-part should be a concrete, immediate action that is small enough to actually perform when the cue occurs. Vague plans ("if I struggle, I'll try harder") don't create the automatic cue-response link. Specific plans do ("if I open my laptop to study and feel the urge to check Slack first, I'll close Slack and open my project file before I do anything else"). 70 - 71 - ### Boundary conditions on SMART goals 72 - 73 - Goal-setting interventions have shown positive effects, including over brief periods (e.g., one week) and in digital and text-based interventions. The MCII exercise is a formal theory with a specified mechanism for why mental contrasting creates expectancy-dependent commitment and cue-response links. However, the SMART goal framework is a practitioner heuristic, not a formal theory. The empirical foundation underneath the SMART goal heuristic argues that specific, difficult goals outperform vague "do your best" goals. Progress monitoring, which is encouraged by the Measurable and Time-bound dimensions, also has robust meta-analytic support, with stronger effects when progress is physically recorded. 74 - 75 - While individual dimensions like progress monitoring have robust support, the SMART acronym as a package can oversimplify a more complex evidence base. Specificity in particular does not always help. A meta-analysis of goal-setting interventions for physical activity found no significant difference between specific and non-specific goals (McEwan et al., 2016), and Pietsch et al. (2024) found SMART goals were no more effective than do-your-best or open goals for creative performance. For complex or novel tasks where exploration and strategy development matter more than hitting a defined target, overly specific goals may actually constrain learning. 76 - 77 - This matters for this exercise because early learning goals can often be exploratory. A developer learning a new language or framework is not necessarily optimizing a known task when they are navigating uncertainty and testing new ideas. This is why the exercise treats SMART as a set of dimensions to consider rather than a checklist to complete, and the facilitator should focus on the 1-2 dimensions that are most underdeveloped in the learner's stated goal rather than mechanically walking through all five. In this Skill, SMART is used to deepen the initial goal but the majority of the exercise should center the MCII. 78 - 79 - --- 80 - 81 - ## Sources 82 - 83 - Duckworth, A. L., Grant, H., Loew, B., Oettingen, G., & Gollwitzer, P. M. (2011). Self-regulation strategies improve self-discipline in adolescents: Benefits of mental contrasting and implementation intentions. *Educational Psychology*, 31(1), 17-26. 84 - 85 - Gollwitzer, P. M. (1999). Implementation intentions: Strong effects of simple plans. *American Psychologist*, 54(7), 493. 86 - 87 - Gollwitzer, P. M. (2014). Weakness of the will: Is a quick fix possible? *Motivation and Emotion*, 38(3), 305-322. 88 - 89 - Gollwitzer, P. M., & Sheeran, P. (2006). Implementation intentions and goal achievement: A meta-analysis of effects and processes. Advances in experimental social psychology, 38, 69-119. 90 - 91 - Harkin, B., Webb, T. L., Chang, B. P., Prestwich, A., Conner, M., Kellar, I., ... & Sheeran, P. (2016). Does monitoring goal progress promote goal attainment? A meta-analysis of the experimental evidence. Psychological bulletin, 142(2), 198. 92 - 93 - Inzlicht, M., Werner, K. M., Briskin, J. L., & Roberts, B. W. (2021). Integrating models of self-regulation. *Annual Review of Psychology*, 72(1), 319-345. 94 - 95 - Kappes, A., Singmann, H., & Oettingen, G. (2012). Mental contrasting instigates goal pursuit by linking obstacles of reality with instrumental behavior. Journal of Experimental Social Psychology, 48(4), 811-818. 96 - 97 - Locke, E. A., & Latham, G. P. (2002). Building a practically useful theory of goal setting and task motivation: A 35-year odyssey. American psychologist, 57(9), 705. 98 - 99 - Locke, E. A., & Latham, G. P. (2019). The development of goal setting theory: A half century retrospective. Motivation Science, 5(2), 93-105 100 - 101 - McEwan, D., Harden, S. M., Zumbo, B. D., Sylvester, B. D., Kaulius, M., Ruissen, G. R., ... & Beauchamp, M. R. (2016). The effectiveness of multi-component goal setting interventions for changing physical activity behaviour: a systematic review and meta-analysis. Health psychology review, 10(1), 67-88. 102 - 103 - Oettingen, G. (2012). Future thought and behaviour change. *European Review of Social Psychology*, 23(1), 1-63. 104 - 105 - Oettingen, G., & Gollwitzer, P. M. (2010). Strategies of setting and implementing goals: Mental contrasting and implementation intentions. 106 - 107 - Pietsch, S., Riddell, H., Semmler, C., Ntoumanis, N., & Gucciardi, D. F. (2024). SMART goals are no more effective for creative performance than do-your-best goals or non-specific, exploratory 'open goals'. Educational Psychology, 44(9-10), 946-962. 108 - 109 - Wang, G., Wang, Y., & Gai, X. (2021). A meta-analysis of the effects of mental contrasting with implementation intentions on goal attainment. *Frontiers in Psychology*, 12, 565202.
-211
home/profiles/opencode/skills/learning-opportunities/SKILL.md
··· 1 - --- 2 - name: learning-opportunities 3 - description: Facilitates deliberate skill development during AI-assisted coding. Offers interactive learning exercises after architectural work (new files, schema changes, refactors). Use when completing features, making design decisions, or when user asks to understand code better. Supports the user's stated goal of understanding design choices as learning opportunities. 4 - argument-hint: "[orient]" 5 - license: CC-BY-4.0 6 - --- 7 - 8 - # Learning Opportunities 9 - 10 - > Invocation argument: $ARGUMENTS 11 - 12 - ## Purpose 13 - 14 - The user wants to build genuine expertise while using AI coding tools, not just ship code. These exercises help break the "AI productivity trap" where high velocity output and high fluency can lead to missing opportunities for active learning. 15 - 16 - When adapting these techniques or making judgment calls, consult [PRINCIPLES.md](https://github.com/DrCatHicks/learning-opportunities/blob/main/learning-opportunities/skills/learning-opportunities/resources/PRINCIPLES.md) for the underlying learning science. 17 - 18 - ## When to offer exercises 19 - 20 - Offer an optional 10-15 minute exercise after: 21 - - Creating new files or modules 22 - - Database schema changes 23 - - Architectural decisions or refactors 24 - - Implementing unfamiliar patterns 25 - - Any work where the user asked "why" questions during development 26 - 27 - **Always ask before starting**: "Would you like to do a quick learning exercise on [topic]? About 10-15 minutes." 28 - 29 - ## When not to offer 30 - 31 - - User declined an exercise offer this session 32 - - User has already completed 2 exercises this session 33 - 34 - Keep offers brief and non-repetitive. One short sentence is enough. 35 - 36 - ## Scope 37 - 38 - This skill applies to: 39 - - Claude Code sessions (primary context) 40 - - Technical discussions in chat where code concepts are being explored 41 - - Any context where the user is learning through building 42 - 43 - ## Core principle: Pause for input 44 - 45 - **End your message immediately after the question.** Do not generate any further content after the pause point — treat it as a hard stop for the current message. This creates commitment that strengthens encoding and surfaces mental model gaps. 46 - 47 - After the pause point, do not generate: 48 - - Suggested or example responses 49 - - Hints disguised as encouragement ("Think about...", "Consider...") 50 - - Multiple questions in sequence 51 - - Italicized or parenthetical clues about the answer 52 - - Any teaching content 53 - 54 - Allowed after the question: 55 - - Content-free reassurance: "(Take your best guess—wrong predictions are useful data.)" 56 - - An escape hatch: "(Or we can skip this one.)" 57 - 58 - Pause points follow this pattern: 59 - 1. Pose a specific question or task 60 - 2. Wait for the user's response (do not continue until they reply), and do not provide any prompt suggestions 61 - 3. After their response, provide feedback that connects their thinking to the actual behavior 62 - 4. If their prediction was wrong, be clear about what's incorrect, then explore the gap—this is high-value learning data 63 - 5. Don't attribute to the user any insight they didn't actually express. If they described what happens but not why, acknowledge the what without crediting causal understanding. 64 - 65 - Use explicit markers: 66 - 67 - > **Your turn:** What do you think happens when [specific scenario]? 68 - > 69 - > (Take your best guess—wrong predictions are useful data.) 70 - 71 - Wait for their response before continuing. 72 - 73 - ## Exercise types 74 - 75 - ### Prediction → Observation → Reflection 76 - 77 - 1. **Pause:** "What do you predict will happen when [specific scenario]?" 78 - 2. Wait for response 79 - 3. Walk through actual behavior together 80 - 4. **Pause:** "What surprised you? What matched your expectations?" 81 - 82 - ### Generation → Comparison 83 - 84 - 1. **Pause:** "Before I show you how we handle [X], sketch out how you'd approach it" 85 - 2. Wait for response 86 - 3. Show the actual implementation 87 - 4. **Pause:** "What's similar? What's different, and why do you think we went this direction?" 88 - 89 - ### Trace the path 90 - 91 - 1. Set up a concrete scenario with specific values 92 - 2. **Pause at each decision point:** "The request hits the middleware now. What happens next?" 93 - 3. Wait before revealing each step 94 - 4. Continue through the full path 95 - 96 - 97 - ### Debug this 98 - 99 - 1. Present a plausible bug or edge case 100 - 2. **Pause:** "What would go wrong here, and why?" 101 - 3. Wait for response 102 - 4. **Pause:** "How would you fix it?" 103 - 5. Discuss their approach 104 - 105 - ### Teach it back 106 - 107 - 1. **Pause:** "Explain how [component] works as if I'm a new developer joining the project" 108 - 2. Wait for their explanation 109 - 3. Offer targeted feedback: what they nailed, what to refine 110 - 111 - ### Retrieval check-in (for returning sessions) 112 - 113 - At the start of a new session on an ongoing project: 114 - 115 - 1. **Pause:** "Quick check—what do you remember about how [previous component] handles [scenario]?" 116 - 2. Wait for response 117 - 3. Fill gaps or confirm, then proceed 118 - 119 - ## Techniques to weave in 120 - 121 - **Elaborative interrogation**: Ask "why," "how," and "when else" questions 122 - - "Why did we structure it this way rather than [alternative]?" 123 - - "How would this behave differently if [condition changed]?" 124 - - "In what context might [alternative] be a better choice?" 125 - 126 - **Interleaving**: Mix concepts rather than drilling one 127 - - "Which of these three recent changes would be affected if we modified [X]?" 128 - 129 - **Varied practice contexts**: Apply the same concept in different scenarios 130 - - "We used this pattern for user auth—how would you apply it to API key validation?" 131 - 132 - **Concrete-to-abstract bridging**: After hands-on work, transfer to broader contexts 133 - - "This is an example of [pattern]. Where else might you use this approach?" 134 - - "What's the general principle here that you could apply to other projects?" 135 - 136 - **Error analysis**: Examine mistakes and edge cases deliberately 137 - - "Here's a bug someone might accidentally introduce—what would go wrong and why?" 138 - 139 - ## Hands-on code exploration 140 - 141 - **Prefer directing users to files over showing code snippets.** Having learners locate code themselves builds codebase familiarity and creates stronger memory traces than passively reading. 142 - 143 - ### Completion-style prompts 144 - 145 - Give enough context to orient, but have them find the key piece: 146 - 147 - > Open `[file]` and find the `[component]`. What does it do with `[variable]`? 148 - 149 - ### Fading scaffolding 150 - 151 - Adjust guidance based on demonstrated familiarity: 152 - 153 - - **Early:** "Open `[file]`, scroll to around line `[N]`, and find the `[function]`" 154 - - **Later:** "Find where we handle `[feature]`" 155 - - **Eventually:** "Where would you look to change how `[feature]` works?" 156 - 157 - Fading adjusts the difficulty of the *question setup*, not the *answer*. At every scaffolding level — from "open file X, line N" to "where would you look?" — the learner still generates the answer themselves. If a learner is struggling, move back UP the scaffolding ladder (more specific question) rather than hinting at the answer. 158 - 159 - ### Pair finding with explaining 160 - 161 - After they locate code, prompt self-explanation: 162 - 163 - > You found it. Before I say anything—what do you think this line does? 164 - 165 - ### Example-problem pairs 166 - 167 - After exploring one instance, have them find a parallel: 168 - 169 - > We just looked at how `[function A]` handles `[task]`. Can you find another function that does something similar? 170 - 171 - ### When to show code directly 172 - 173 - - The snippet is very short (1-3 lines) and full context isn't needed 174 - - You're introducing new syntax they haven't encountered 175 - - The file is large and searching would be frustrating rather than educational 176 - - They're stuck and need to move forward 177 - 178 - ## Facilitation guidelines 179 - 180 - - **Ask if they want to engage** before starting any exercise 181 - - **Honor their response time**—don't rush or fill silence 182 - - **Adjust difficulty dynamically**: if they're nailing predictions, increase complexity; if they're struggling, narrow scope 183 - - **Embrace desirable difficulty**: exercises should require effort without being frustrating 184 - - **Offer escape hatches**: "Want to keep going or pause here?" 185 - - **Keep exercises to 10-15 minutes** unless they want to go deeper 186 - - **Be direct about errors**: When they're wrong, say so clearly, then explore why without judgment 187 - 188 - ## Orientation mode 189 - 190 - If this skill is invoked with the argument `orient` (i.e., `/learning-opportunities orient`), run a guided repo orientation exercise instead of the default exercise offer flow. 191 - 192 - ### Finding the orientation file 193 - 194 - Look for `resources/orientation.md` relative to this skill file at these locations, in order: 195 - 196 - 1. `.claude/skills/learning-opportunities/resources/orientation.md` (project level) 197 - 2. `~/.claude/skills/learning-opportunities/resources/orientation.md` (user level) 198 - 199 - If the file does not exist at either location, stop and tell the user: 200 - 201 - > "No orientation file found. Run `/orient:orient` first to generate one for this repo. It takes about 30 seconds." 202 - 203 - See [orient](https://github.com/mcmullarkey/orient) for the plugin that generates orientation files. 204 - 205 - ### Running the orientation exercise 206 - 207 - If `orientation.md` exists, read it and run through the **Suggested exercise sequence** section it contains. Apply all standard skill techniques: pause for input after each question, use fading scaffolding, embrace wrong predictions as learning data. The orientation file contains repo-specific content but not full pedagogical guidance — consult [PRINCIPLES.md](https://github.com/DrCatHicks/learning-opportunities/blob/main/learning-opportunities/skills/learning-opportunities/resources/PRINCIPLES.md) as needed when making facilitation decisions. 208 - 209 - Before starting, give the user a one-sentence summary of what the orientation covers and ask if they want to proceed — consistent with the "always ask before starting" principle. 210 - 211 - After the exercise sequence, ask the user: "What's one thing about this codebase that surprised you or that you want to dig into further?" Use their answer to offer a relevant follow-up exercise or file to explore.
-193
home/profiles/opencode/skills/learning-opportunities/resources/PRINCIPLES.md
··· 1 - # Learning Principles 2 - 3 - This document provides the scientific rationale for the techniques in SKILL.md. Consult it when adapting techniques or making judgment calls about learning approaches. 4 - 5 - ## Core insight: We're often wrong about what helps us learn 6 - 7 - Misconceptions about learning are common and predict long-term performance differences between efficient and inefficient learning approaches. Our minds can confuse the *experience of effort* with *actual learning*. Also, our minds can confuse the *experience of fluency* with *actual knowledge*. Strategies that learners assume are productive because they feel high effort often aren't, whereas study strategies that feel easy to learners can often work better than we expect. At the same time, productive struggle is more productive than we realize, and learners need to encounter mistakes and feedback to progress. 8 - 9 - Focusing on long-term learning outcomes, rather than short-term performance, helps learners. 10 - 11 - --- 12 - 13 - ## The Generation Effect & The Power of Testing 14 - 15 - **Finding:** Users encode information better when they produce it rather than passively consume it. Testing produces better delayed retention than passive consumption and passive review, even when immediate performance is worse. 16 - 17 - **Mechanism:** Active retrieval strengthens memory traces in ways that passive review does not. The mind learns more accurately and deeply with hands-on engagement. Testing promotes strong positive effects on long-term retention. 18 - 19 - **In practice:** Having learners generate predictions, explanations, or solutions—even wrong ones—produces better learning than showing them the answer first. 20 - 21 - **Risk in AI-assisted work:** Becoming a passive recipient of generated solutions. If a user only reads and accepts output, they skip the generative processing that builds understanding. 22 - 23 - **Application:** Prediction exercises, generation-before-instruction, teach-it-back prompts, having users sketch solutions before revealing implementations. 24 - 25 - --- 26 - 27 - ## Pre-testing (Potentiating a Learning Session) 28 - 29 - **Finding:** Attempting to figure out an answer *before* learning new information produces stronger memory and deeper understanding for that new information—even when the pre-learning attempt was wrong. 30 - 31 - **Mechanism:** Pre-testing directs attention to knowledge gaps and primes the mind to encode incoming information more effectively. A failed attempt makes the correct answer more memorable by contrast. 32 - 33 - **Key research:** Giebl et al. found that novice programmers who attempted problems with incomplete information before searching performed better than those who searched immediately, even though the pretesting group was unable to solve the problem. 34 - 35 - **Application:** Ask "what do you predict will happen" before tracing code. Ask "how would you approach this" before showing implementations. Interweave testing questions with revealing new information and concepts. Wrong predictions are valuable data, not failures. 36 - 37 - --- 38 - 39 - ## The Spacing Effect 40 - 41 - **Finding:** Distributing learning over time produces better retention than massing it into a single session. 42 - 43 - **Mechanism:** Spaced retrieval requires the brain to reconstruct knowledge repeatedly, strengthening long-term memory. Massed practice (cramming) creates fluency with short-term effects that fade quickly. 44 - 45 - **Learner Misconception:** Spacing feels easier than cramming, so people rarely believe it works better. In studies, spacing produces better performance more effective for the majority of participants, yet most believe massing had been more effective. 46 - 47 - **Risk in AI-assisted work:** Machine velocity and ease of AI generation can push a user into constant "cram"—completing work in singular large pushes rather than repeatedly returning to tasks with interruption. 48 - 49 - **Application:** Retrieval check-ins at the start of sessions. Return to the same learning area at multiple times during a project. Build small and targeted reflection moments into workflows. 50 - 51 - --- 52 - 53 - ## The Worked Example Effect 54 - 55 - **Finding:** Studying worked examples, complete solutions with some of the steps shown, produces better initial learning than problem-solving practice, particularly for novices. This effect can reverse for experts, where the shown steps are redundant information. 56 - 57 - **Mechanism:** Problem-solving imposes high cognitive load because learners must simultaneously search for a solution strategy and learn the underlying concept. Worked examples can reduce extraneous load during initial learning, freeing cognitive resources for schema construction. As expertise develops, this reverses as worked examples become too redundant and going straight into problem-solving becomes more effective (the "expertise reversal effect"). 58 - 59 - **Learner Misconception:** Learners often do not seek out enough examples, but learners who study worked examples outperform those who spend equivalent time problem-solving with no example exposure. 60 - 61 - **Risk in AI-assisted work:** AI-generated solutions can function like worked examples, which is beneficial for novices trying to build initial schemas. However, if learners never transition to generating solutions themselves, they miss the retrieval practice and generation effects that consolidate learning. The convenience of AI examples can keep learners perpetually in "novice mode." 62 - 63 - **Application:** Use the fading technique: start with complete examples, then progressively remove steps and ask the learner to fill in gaps. Prompt learners to explain *why* each step works, not just *what* happens. 64 - 65 - --- 66 - 67 - ## Desirable Difficulties 68 - 69 - **Finding:** Conditions that make learning slower or harder in the short term often produce better long-term retention and transfer. 70 - 71 - **Mechanism:** Effort during encoding creates stronger, more durable learning. Specific and manageable challenges also promote knowledge transfer. 72 - 73 - **Risk in AI-assisted work:** Users often optimize for short-term performance (feeling fluent, moving fast) at the expense of long-term capability, while underestimating the learning gain from productive struggle. 74 - 75 - **Implications:** 76 - - Exercises should require effort without being frustrating 77 - - Struggle during learning is often a sign it's working, not failing 78 - - Slowing down can produce more value over time than optimizing for throughput 79 - 80 - **Application:** Don't simplify exercises just because the learner struggles. Embrace productive difficulty. Scaffold when stuck, but don't eliminate the challenge. 81 - 82 - --- 83 - 84 - ## Illusions of Learning 85 - 86 - ### Fluency Illusion 87 - 88 - **Finding:** When information feels easy to process or easy to look up, we overestimate how well we've learned it. 89 - 90 - **Learner Misconception:** Smooth reading or easy recognition creates a sense of familiarity that we mistake for durable knowledge. 91 - 92 - **Risk in AI-assisted work:** Generated code that's quickly produced can make users feel the illusion that they understand it even when they don't. The fluency of the output masks gaps in your mental model. 93 - 94 - **Application:** Encourage user to self-navigate through new files and do hands-on testing of their understanding. Unpack mental models by asking about consequences of specific changes. 95 - 96 - ### Effort Illusion 97 - 98 - **Finding:** Because of misconceptions about effort, users can also mistake the *feeling* of working hard for actual learning. 99 - 100 - **Learner Misconception:** Grinding through tasks creates a sense of productivity that may not correspond to skill development. High output can coexist with skill stagnation. 101 - 102 - **Risk in AI-assisted work:** Shipping lots of code can feel like growth even when you're not building transferable understanding. Users may not notice production fatigue and burnout that decreases their ability to verification and self-monitoring. 103 - 104 - **Application:** Use retrieval exercises to test actual understanding. Identify learning opportunities at major project turning points. 105 - 106 - --- 107 - 108 - ## Active vs. Passive Processing 109 - 110 - **Finding:** Active engagement (retrieving, explaining, generating) beats passive review (reading, watching, accepting). 111 - 112 - **Mechanism:** Passive exposure creates recognition and familiarity but is less efficient for long-term retention, whereas active processing builds retrieval and transfer. 113 - 114 - **Application:** 115 - - Asking "what do you think this does" beats explaining what it does 116 - - Having users locate code beats showing them code 117 - - Teach-it-back exercises test real understanding 118 - 119 - --- 120 - 121 - ## Dynamic Testing 122 - 123 - **Finding:** Errors during learning, when followed by corrective feedback, enhance retention compared to error-free learning. Allowing learners to adjust their answer with feedback provides a more accurate view of their learning. 124 - 125 - **Mechanism:** Errors create prediction violations that the brain encodes strongly. The surprise of being wrong can make the correct information more memorable and helps learners adjust their mental models. 126 - 127 - **Critical nuance:** This requires clear feedback. Errors without correction, or with vague/softened feedback, don't produce the benefit. 128 - 129 - **Application:** When learners are wrong, be direct about what's incorrect, then explore why. Don't soften wrongness into ambiguity. 130 - 131 - --- 132 - 133 - ## Transfer and Interleaving 134 - 135 - **Finding:** Learning transfers better when explicitly connected to underlying principles. Learners build mental models and schema knowledge more efficiently when presented with concepts in varied contexts. 136 - 137 - **Mechanism:** Knowledge encoded with a single context tends to stay bound to that context. Explicit abstraction and varied examples build flexible knowledge. 138 - 139 - **Application:** After hands-on practice, prompt transfer: "This is an example of [pattern]. Where else might you use this?" Apply the same concept in different scenarios rather than drilling identical cases. 140 - 141 - --- 142 - 143 - ## Metacognition Awareness 144 - 145 - **Finding:** Learners who monitor and adjust their own learning strategies outperform those who don't, independent of raw ability. Experts learn to harness strategic metacognitive practices to transcend their original cognitive constraints while problem-solving. 146 - 147 - **Key capabilities:** 148 - - Monitoring: Knowing when you understand vs. when you don't 149 - - Control: Adjusting strategies based on that monitoring 150 - - Calibration: Accurately judging your own competence 151 - 152 - **Risk in AI-assisted work:** Constant production velocity can suppress metacognitive monitoring. Users who don't pause to ask "am I actually learning this?" may not develop metacognitive awareness. 153 - 154 - **Application:** Build reflection moments into workflows. Prompt self-assessment. Make space for learners to notice and explore their own mental models and gaps. 155 - 156 - --- 157 - 158 - ## What this means for AI-assisted development 159 - 160 - The combination of these principles points to a specific risk profile for AI-assisted coding: 161 - 162 - 1. **Generation effect undermined:** Accepting generated code skips the active processing that builds understanding 163 - 2. **Fluency illusion amplified:** Clean generated code feels understood even when it isn't 164 - 3. **Spacing effect eliminated:** Machine velocity pushes toward constant cramming 165 - 4. **Metacognition suppressed:** Fast workflows don't leave room to monitor learning and develop schema representation 166 - 5. **Testing and retrieval underused** Fewer opportunities to benefit from testing 167 - 168 - The techniques in SKILL.md are designed to counteract these specific risks by reintroducing: 169 - - Active generation (predictions, explanations, sketches) 170 - - Retrieval practice (check-ins, teach-it-back, self-testing) 171 - - Deliberate pauses (spacing, reflection) 172 - - Explicit metacognition (self-assessment, gap identification) 173 - 174 - --- 175 - 176 - ## Sources 177 - 178 - - Bjork, R. A., Dunlosky, J., & Kornell, N. (2013). Self-regulated learning: Beliefs, techniques, and illusions. Annual review of psychology, 64(1), 417-444. 179 - - Dunlosky, J., Rawson, K. A., Marsh, E. J., Nathan, M. J., & Willingham, D. T. (2013). Improving students' learning with effective learning techniques: Promising directions from cognitive and educational psychology. Psychological Science in the Public interest, 14(1), 4-58. 180 - - Ericsson, K. A., Hoffman, R. R., & Kozbelt, A. (Eds.). (2018). The Cambridge handbook of expertise and expert performance. Cambridge University Press. 181 - - Giebl, S., Mena, S., Storm, B. C., Bjork, E. L., & Bjork, R. A. (2021). Answer first or Google first? Using the Internet in ways that enhance, not impair, one's subsequent retention of needed information. Psychology Learning & Teaching, 20(1), 58-75. 182 - - Hicks, C. M., Lee, C. S., & Foster-Marks, K. (2025, March 15). The New Developer: AI Skill Threat, Identity Change & Developer Thriving in the Transition to AI-Assisted Software Development. https://doi.org/10.31234/osf.io/2gej5_v2 183 - - Kalyuga, S. (2007). Expertise reversal effect and its implications for learner-tailored instruction. Educational psychology review, 19(4), 509-539. 184 - - Kang, S. H. (2016). Spaced repetition promotes efficient and effective learning: Policy implications for instruction. Policy Insights from the Behavioral and Brain Sciences, 3(1), 12-19. 185 - - Kornell, N. (2009). Optimising learning using flashcards: Spacing is more effective than cramming. Applied Cognitive Psychology: The Official Journal of the Society for Applied Research in Memory and Cognition, 23(9), 1297-1317. 186 - - Murphy, D. H., Little, J. L., & Bjork, E. L. (2023). The value of using tests in education as tools for learning—not just for assessment. Educational Psychology Review, 35(3), 89. 187 - - Roediger III, H. L., & Karpicke, J. D. (2006). The power of testing memory: Basic research and implications for educational practice. Perspectives on psychological science, 1(3), 181-210. 188 - - Rohrer, D., & Taylor, K. (2007). The shuffling of mathematics problems improves learning. Instructional Science, 35(6), 481-498. 189 - - Skulmowski, A., & Xu, K. M. (2022). Understanding cognitive load in digital and online learning: A new perspective on extraneous cognitive load. Educational psychology review, 34(1), 171-196. 190 - - Soderstrom, N. C., & Bjork, R. A. (2015). Learning versus performance: An integrative review. Perspectives on Psychological Science, 10(2), 176-199. 191 - - Sweller, J., & Cooper, G. A. (1985). The use of worked examples as a substitute for problem solving in learning algebra. Cognition and instruction, 2(1), 59-89. 192 - - Tankelevitch, L., Kewenig, V., Simkute, A., Scott, A. E., Sarkar, A., Sellen, A., & Rintel, S. (2024, May). The metacognitive demands and opportunities of generative AI. In Proceedings of the 2024 CHI Conference on Human Factors in Computing Systems (pp. 1-24). 193 - - Hicks, C. (2025). Cognitive helmets for the AI bicycle: Part 1. *Fight for the Human*. https://www.fightforthehuman.com/cognitive-helmets-for-the-ai-bicycle-part-1/
-256
home/profiles/opencode/skills/orient/SKILL.md
··· 1 - --- 2 - name: orient 3 - description: Generates a repo-specific orientation.md resource for the learning-opportunities skill. Only invoke via slash command (/orient). Do not trigger automatically. 4 - license: CC-BY-4.0 5 - --- 6 - 7 - # Create Orientation 8 - 9 - ## Purpose 10 - 11 - Generate a repo-specific `orientation.md` file inside the `learning-opportunities` skill's `resources/` directory. This file is used by that skill when invoked as `/learning-opportunities orientation` to run a structured learning exercise for someone new to the codebase. 12 - 13 - --- 14 - 15 - ## Step 1: Find where to write orientation.md 16 - 17 - Always write to the **project level**, regardless of where the `learning-opportunities` skill is installed: 18 - 19 - ``` 20 - .claude/skills/learning-opportunities/resources/orientation.md 21 - ``` 22 - 23 - (relative to the current working directory) 24 - 25 - If the directory `.claude/skills/learning-opportunities/resources/` does not exist, create it. If it already exists, leave it and any files inside it untouched — only write `orientation.md`. 26 - 27 - This keeps orientation files co-located with the repo they describe — they can be committed to version control, shared with teammates, and never collide across projects. 28 - 29 - --- 30 - 31 - ## Argument check 32 - 33 - You were invoked with arguments: $ARGUMENTS 34 - 35 - If the argument is `showboat`, skip to the **Showboat Path** section below. 36 - 37 - Otherwise, continue with Steps 2-5 (the default path). 38 - 39 - --- 40 - 41 - ## Step 2: Detect the repo's primary language(s) 42 - 43 - Check for these manifest/config files at the project root and note all that exist. A repo may use multiple languages. 44 - 45 - | Language | Signal files | 46 - |:-----------|:----------------------------------------------------------| 47 - | Python | `pyproject.toml`, `setup.py`, `setup.cfg`, `Pipfile`, `requirements.txt` | 48 - | JavaScript | `package.json` (no `tsconfig.json`) | 49 - | TypeScript | `package.json` + `tsconfig.json` | 50 - | R | `DESCRIPTION`, `NAMESPACE`, any `*.Rproj` | 51 - | Ruby | `Gemfile`, any `*.gemspec` | 52 - | Go | `go.mod` | 53 - | Rust | `Cargo.toml` | 54 - | C/C++ | `CMakeLists.txt`, `configure.ac`, root-level `Makefile` | 55 - | Java/Kotlin| `pom.xml`, `build.gradle`, `build.gradle.kts` | 56 - | C# | any `*.csproj` or `*.sln` | 57 - | Nix | `flake.nix`, `default.nix`, `shell.nix` | 58 - 59 - Record all detected languages. For each detected language, read its primary manifest file in full — it contains declared purpose, dependencies, entry points, and scripts/commands that are essential for orientation. 60 - 61 - --- 62 - 63 - ## Step 3: Explore the repo 64 - 65 - Use the following sequence, drawn from research on expert program comprehension strategies. Experts read **strategically and selectively**, not exhaustively. The goal is a mental model of structure, not line-by-line understanding. 66 - 67 - ### 3a. README and top-level docs 68 - Read `README.md`, `README.rst`, or `README` at the project root. Also check for a `docs/` directory — read its index or table of contents if present. This gives the stated purpose and intended audience. 69 - 70 - *Source: Spinellis, "Code Reading: The Open Source Perspective" (2003) — start with the build system and README before reading any application code.* 71 - 72 - ### 3b. Directory tree 73 - Run `find . -maxdepth 3 -not -path '*/.git/*' -not -path '*/node_modules/*' -not -path '*/__pycache__/*' -not -path '*/.venv/*'` to get the top-level structure. Read the directory tree as an architectural table of contents — naming conventions (`src/`, `lib/`, `tests/`, `cmd/`, `pkg/`) reveal intent before any code is read. 74 - 75 - *Source: Spinellis (2003) — "directory tree as table of contents."* 76 - 77 - ### 3c. Entry points 78 - Identify and read the main entry points based on detected language: 79 - 80 - - **Python**: `__main__.py`, `cli.py`, `main.py`, or the `[tool.poetry.scripts]` / `[project.scripts]` section of `pyproject.toml` 81 - - **JavaScript/TypeScript**: `main` field in `package.json`, `index.js`, `src/index.ts` 82 - - **Go**: files in `cmd/*/main.go` or root `main.go` 83 - - **Rust**: `src/main.rs` or `src/lib.rs` 84 - - **R**: `R/` directory, the `DESCRIPTION` file's `Imports` 85 - - **Ruby**: files in `bin/`, `lib/<gem-name>.rb` 86 - - **C/C++**: `main.c`, `main.cpp`, or the primary target in `CMakeLists.txt` 87 - - **Nix**: `flake.nix` outputs, `default.nix` top-level attributes 88 - 89 - *Source: Hermans, "The Programmer's Brain" (2021, Manning) — follow the entry point and call graph one level at a time.* 90 - 91 - ### 3d. Test files 92 - Read 2-3 test files, prioritizing integration or end-to-end tests over unit tests. Tests are executable specifications — reading test names and assertions is one of the fastest ways to understand what a module is meant to do. 93 - 94 - *Source: Storey et al., "How Software Developers Use Tools, Cognitive Strategies, and Representations to Navigate Code" (IEEE TSE, 2006) — use the test suite as a specification.* 95 - 96 - ### 3e. Core modules 97 - Identify the 5-8 most important source files based on what you have learned. Read their top-level structure (class/function names, imports, docstrings) without necessarily reading every implementation in full. 98 - 99 - ### 3f. Recent git history (if git is available) 100 - Run `git log --oneline -20` to see recent activity. Run `git log --format="%f" | sort | uniq -c | sort -rn | head -10` to identify the most-edited files. High-churn files are usually the core of the system. 101 - 102 - *Source: Spolsky practitioner writing — "find the biggest, most-edited file; read git history to understand why code is the way it is."* 103 - 104 - --- 105 - 106 - ## Step 4: Synthesize and write orientation.md 107 - 108 - Write the file to the path identified in Step 1. Use this exact structure: 109 - 110 - ```markdown 111 - # Repo Orientation: [repo name] 112 - 113 - > Generated by /orient. Re-run to update. 114 - 115 - ## One-line purpose 116 - [Single sentence: what this repo does and why it exists. Written for someone with no prior context.] 117 - 118 - ## Primary language(s) 119 - [List languages detected, with the dominant one first.] 120 - 121 - ## Pipeline / workflow stages 122 - [Ordered list of the main stages data or requests flow through. One line each. If the repo has no pipeline, describe the main modules and their relationships instead.] 123 - 124 - ## Key files 125 - [6-10 entries in this format:] 126 - - `path/to/file.py` — [what it does] | [why a new developer should read it] 127 - 128 - ## Core concepts 129 - [3-5 domain or architectural concepts essential to working in this codebase. For each:] 130 - **[Concept name]**: [Plain-English definition. Where in the code it lives.] 131 - 132 - ## Common gotchas 133 - [2-3 things that commonly trip up new developers. Be specific — reference actual file paths or function names.] 134 - 135 - ## Suggested exercise sequence 136 - [EXACTLY 2 exercises. These are orientation exercises — their job is to build a high-level mental model of the repo, not to drill implementation details. 137 - 138 - Orientation exercises follow this pattern: direct the learner to read one specific, short artifact first, then ask them to synthesize or explain what they just read. Never ask them to predict something they couldn't know without reading — the goal is comprehension and synthesis, not prior knowledge. 139 - 140 - Good orientation exercises: 141 - - "Open README.md and read the Features section. Then close it and explain to a non-developer what this tool produces and why someone would use it." 142 - - "Open `models.py`. Find the dataclass that represents everything the pipeline produces for one audio file. What fields does it have, and what does that tell you about the pipeline's stages?" 143 - - "Open `config/default.yaml` and skim it. What are the two or three settings you'd most likely need to change for a new project, and why?" 144 - 145 - Bad orientation exercises (save these for later sessions): 146 - - "Without opening any files, predict the pipeline stages" — learner has no basis for this 147 - - Predicting specific function outputs, column names, or algorithmic behavior 148 - - Tracing through individual method implementations 149 - - Debugging specific logic (e.g. merge suffix behavior, metadata propagation) 150 - 151 - For each exercise, specify: the exact file to open, what to read, and what synthesis question to answer after reading.] 152 - 153 - ## Sources consulted 154 - [List the files and paths you actually read while generating this file.] 155 - ``` 156 - 157 - Keep each section concise. This is a teaching scaffold, not documentation. Prioritize clarity over completeness. 158 - 159 - --- 160 - 161 - ## Step 5: Confirm to the user 162 - 163 - > **Note for skill maintainers**: Academic and practitioner sources for the exploration methodology in Steps 3a-3f are documented in [resources/orient-bibliography.md](resources/orient-bibliography.md). Load that file only if you need to update or cite sources — it is not needed during normal skill execution. 164 - 165 - Tell the user: 166 - - Where the file was written 167 - - How many key files and concepts were identified 168 - - How to use it: `/learning-opportunities orient` 169 - - That they can re-run `/orient` at any time to regenerate it as the codebase evolves 170 - 171 - --- 172 - 173 - ## Showboat Path 174 - 175 - This path replaces Steps 2-5 when the argument is `showboat`. It produces `orientation.md` at the same location identified in Step 1, but uses the `showboat` CLI tool (via `uvx`) to build a detailed, linear code walkthrough. 176 - 177 - ### Showboat Step 1: Check for uv 178 - 179 - Run `command -v uv` to verify that `uv` is installed. 180 - 181 - If `uv` is not found, tell the user: 182 - 183 - > `uv` is required for showboat mode but was not found on your PATH. 184 - > Install it from: https://docs.astral.sh/uv/getting-started/installation/ 185 - 186 - Then stop — do not proceed further. 187 - 188 - ### Showboat Step 2: Read the repo and plan the document 189 - 190 - Read the repo to understand its structure, purpose, and key code paths. Then plan a linear walkthrough document with: 191 - 192 - - A title and table of contents 193 - - Commentary sections that explain the codebase narratively, in reading order 194 - - A Code Listings appendix containing the actual code snippets referenced by commentary 195 - - A suggested exercise sequence (same criteria as Step 4's exercise requirements — exactly 2 orientation exercises) 196 - 197 - Plan all section headings, code snippets, and sequential listing numbers **upfront before writing anything**. Each listing gets a sequential number (Listing 1, Listing 2, etc.) and a short description. 198 - 199 - ### Showboat Step 3: Learn the showboat tool 200 - 201 - Run `uvx showboat --help` to learn the available commands and their syntax. 202 - 203 - ### Showboat Step 4: Build orientation.md using showboat commands 204 - 205 - Use the showboat CLI to build the file. The output path is the same `orientation.md` from Step 1. Execute commands in this order: 206 - 207 - #### 4a. Initialize the document 208 - 209 - ``` 210 - uvx showboat init <path-to-orientation.md> "<Title>" 211 - ``` 212 - 213 - Then add a table of contents via `uvx showboat note`. 214 - 215 - #### 4b. Write all commentary sections 216 - 217 - Add each commentary section using `uvx showboat note`. Follow these rules for note content: 218 - 219 - - **No fenced code blocks** inside notes — use inline backtick code (`` `like_this` ``) instead 220 - - Reference code listings with inline links: `*([Listing N: description](#listing-N))*` 221 - - Write narratively — explain *why* the code is structured this way, not just *what* it does 222 - 223 - #### 4c. Write the Code Listings appendix 224 - 225 - For each listing planned in Showboat Step 2: 226 - 227 - 1. Add an anchor note: `uvx showboat note` with a heading like `### Listing N: description` and an HTML anchor `<a id="listing-N"></a>` 228 - 2. Add the code via `uvx showboat exec` to capture the actual file content (e.g., using `cat` or `sed` to extract the relevant lines) 229 - 230 - #### 4d. Append suggested exercise sequence 231 - 232 - Add a final section via `uvx showboat note` with exactly 2 orientation exercises. These follow the same criteria as the default path's Step 4: 233 - 234 - - Direct the learner to read one specific, short artifact first 235 - - Then ask them to synthesize or explain what they just read 236 - - Never ask them to predict something they couldn't know without reading 237 - - Specify: the exact file to open, what to read, and what synthesis question to answer 238 - 239 - #### 4e. Verify the document 240 - 241 - Run: 242 - 243 - ``` 244 - uvx showboat verify <path-to-orientation.md> 245 - ``` 246 - 247 - Fix any issues reported before proceeding. 248 - 249 - ### Showboat Step 5: Confirm to the user 250 - 251 - Tell the user: 252 - 253 - - Where the file was written 254 - - That it was generated using showboat mode (a linear code walkthrough) 255 - - How to use it: `/learning-opportunities orient` 256 - - That they can re-run `/orient showboat` at any time to regenerate it
-39
home/profiles/opencode/skills/orient/resources/orient-bibliography.md
··· 1 - # Bibliography: Program Comprehension and Codebase Orientation 2 - 3 - Sources for the exploration methodology used in the orient skill (Steps 3a-3f of SKILL.md). 4 - 5 - --- 6 - 7 - Spinellis, D. (2003). *Code Reading: The Open Source Perspective*. Addison-Wesley. https://www.spinellis.gr/codereading/ 8 - 9 - - Basis for: start with the build system and README; read the directory tree as an architectural table of contents; identify architectural seams between components. 10 - 11 - --- 12 - 13 - Hermans, F. (2021). *The Programmer's Brain*. Manning Publications. https://www.manning.com/books/the-programmers-brain 14 - 15 - - Basis for: follow the entry point and call graph one level at a time; use "beacons" (recognizable patterns) to orient quickly; experts sample strategically rather than reading exhaustively. 16 - 17 - --- 18 - 19 - Storey, M.-A., et al. (2006). How Software Developers Use Tools, Cognitive Strategies, and Representations to Navigate Code. *IEEE Transactions on Software Engineering*. https://ieeexplore.ieee.org/document/1579228 20 - 21 - - Basis for: use the test suite as an executable specification; alternate between systematic and opportunistic traversal; follow data flow, not just control flow. 22 - 23 - --- 24 - 25 - Storey, M.-A., Zimmermann, T., Bird, C., Czerwonka, J., Murphy, B., & Kalliamvakou, E. (2021). Towards a Theory of Software Developer Job Satisfaction and Perceived Productivity. *IEEE Transactions on Software Engineering*, 47(10), 2125-2142. https://ieeexplore.ieee.org/document/8851296 26 - 27 - - Basis for: developer productivity is not separable from social and psychological context; orientation practices that ignore belonging and autonomy will underperform. 28 - 29 - --- 30 - 31 - Spolsky, J. Practitioner writing on reading unfamiliar codebases. https://www.joelonsoftware.com 32 - 33 - - Basis for: read git history to understand why code is the way it is; high-churn files are usually the core of the system. 34 - 35 - --- 36 - 37 - Dagenais, B., et al. (2010). Moving into a New Software Project: Strangers in a Strange Land. *ICSE 2010*. https://dl.acm.org/doi/10.1145/1806799.1806842 38 - 39 - - Basis for: every codebase has a "gateway artifact" that once understood makes everything else legible; prioritize runnable examples over documentation.
+4 -3
hosts/darwin/casks/default.nix
··· 1 - { ... }: 1 + { ... }: 2 2 3 3 { 4 4 homebrew.enable = true; ··· 22 22 "wireshark" 23 23 "1password" 24 24 "orbstack" 25 - # "aerospace" 25 + "scoop" 26 + "aerospace" 26 27 27 28 # Communication Tools 28 29 # Already installed manually 29 30 # "loom" 30 - # "slack" 31 + # "slack" 31 32 # "zoom" 32 33 # "firefox" 33 34 # "1password-cli"
-1
hosts/darwin/default.nix
··· 121 121 system = { 122 122 stateVersion = 4; 123 123 primaryUser = "anishlakhwara"; # required for newer nix-darwin 124 - 125 124 keyboard = { 126 125 enableKeyMapping = true; 127 126 };