There are plenty of GUI download managers, but I wanted a simple one that I can call from the command line.
With the newish dotnet run app.cs style, it became easier to do this.
Here is the full source code. Make sure the file is executable, and then you can run it with downloader.cs someLargeFile.iso and it will download the file, in parts, to the current directory, then it stitches them together. If you have to cancel a download, it will resume.

1#!/usr/bin/dotnet run
2
3using System.Diagnostics;
4using System.Net.Http.Headers;
5
6const int DefaultChunks = 8;
7const int MaxRetries = 5;
8const int RetryDelayMs = 1000;
9const int ProgressUpdateMs = 250;
10
11if (args.Length < 1)
12{
13 Console.Error.WriteLine("Usage: dotnet run downloader.cs <url> [output-file] [chunks]");
14 Console.Error.WriteLine(" url - URL to download");
15 Console.Error.WriteLine(" output-file - Output filename (default: derived from URL)");
16 Console.Error.WriteLine(" chunks - Number of parallel streams (default: 8)");
17 return 1;
18}
19
20var url = args[0];
21var outputFile = args.Length > 1 ? args[1] : Path.GetFileName(new Uri(url).LocalPath);
22var chunks = args.Length > 2 ? int.Parse(args[2]) : DefaultChunks;
23
24if (string.IsNullOrWhiteSpace(outputFile) || outputFile == "/")
25 outputFile = "download";
26
27Console.WriteLine($"URL: {url}");
28Console.WriteLine($"Output: {outputFile}");
29Console.WriteLine($"Streams: {chunks}");
30Console.WriteLine();
31
32using var client = new HttpClient { Timeout = TimeSpan.FromMinutes(30) };
33client.DefaultRequestHeaders.UserAgent.ParseAdd("DownloaderCLI/1.0");
34
35// Probe the server for content-length and range support
36using var headReq = new HttpRequestMessage(HttpMethod.Head, url);
37using var headResp = await client.SendAsync(headReq);
38headResp.EnsureSuccessStatusCode();
39
40var totalSize = headResp.Content.Headers.ContentLength ?? -1;
41var acceptRanges = headResp.Headers.Contains("Accept-Ranges")
42 && headResp.Headers.GetValues("Accept-Ranges").Any(v => v.Contains("bytes", StringComparison.OrdinalIgnoreCase));
43
44if (totalSize <= 0 || !acceptRanges)
45{
46 Console.WriteLine(totalSize <= 0
47 ? "Server did not report content length - falling back to single-stream download."
48 : "Server does not support range requests - falling back to single-stream download.");
49 Console.WriteLine();
50 await SingleStreamDownload(client, url, outputFile);
51 return 0;
52}
53
54Console.WriteLine($"Size: {FormatBytes(totalSize)}");
55Console.WriteLine($"Ranges: supported");
56Console.WriteLine();
57
58var sw = Stopwatch.StartNew();
59var chunkInfos = BuildChunks(totalSize, chunks);
60var progress = new long[chunkInfos.Count];
61
62// Progress reporter
63using var cts = new CancellationTokenSource();
64var progressTask = Task.Run(async () =>
65{
66 while (!cts.Token.IsCancellationRequested)
67 {
68 PrintProgress(progress, chunkInfos, totalSize, sw.Elapsed);
69 try { await Task.Delay(ProgressUpdateMs, cts.Token); } catch (TaskCanceledException) { break; }
70 }
71});
72
73// Download all chunks in parallel
74var tempFiles = new string[chunkInfos.Count];
75var downloadTasks = new Task[chunkInfos.Count];
76
77for (int i = 0; i < chunkInfos.Count; i++)
78{
79 var idx = i;
80 var (start, end) = chunkInfos[idx];
81 tempFiles[idx] = $"{outputFile}.part{idx}";
82 downloadTasks[idx] = DownloadChunk(client, url, start, end, tempFiles[idx], progress, idx);
83}
84
85await Task.WhenAll(downloadTasks);
86
87cts.Cancel();
88await progressTask;
89PrintProgress(progress, chunkInfos, totalSize, sw.Elapsed);
90Console.WriteLine();
91Console.WriteLine();
92
93// Reassemble
94Console.Write("Reassembling... ");
95await Reassemble(tempFiles, outputFile);
96Console.WriteLine("done.");
97
98// Cleanup temp files
99foreach (var f in tempFiles)
100 if (File.Exists(f)) File.Delete(f);
101
102sw.Stop();
103var info = new FileInfo(outputFile);
104Console.WriteLine($"Completed in {sw.Elapsed.TotalSeconds:F1}s - {FormatBytes(info.Length)} @ {FormatBytes((long)(info.Length / sw.Elapsed.TotalSeconds))}/s");
105return 0;
106
107// ---- helper methods ----
108
109static List<(long Start, long End)> BuildChunks(long totalSize, int count)
110{
111 var chunkSize = totalSize / count;
112 var result = new List<(long, long)>(count);
113 for (int i = 0; i < count; i++)
114 {
115 var start = i * chunkSize;
116 var end = (i == count - 1) ? totalSize - 1 : start + chunkSize - 1;
117 result.Add((start, end));
118 }
119 return result;
120}
121
122static async Task DownloadChunk(HttpClient client, string url, long start, long end,
123 string tempFile, long[] progress, int index)
124{
125 for (int attempt = 1; attempt <= MaxRetries; attempt++)
126 {
127 try
128 {
129 // Resume from where we left off if retrying
130 long existingBytes = 0;
131 if (File.Exists(tempFile))
132 {
133 existingBytes = new FileInfo(tempFile).Length;
134 if (existingBytes >= end - start + 1)
135 {
136 progress[index] = end - start + 1;
137 return; // already complete
138 }
139 }
140
141 using var req = new HttpRequestMessage(HttpMethod.Get, url);
142 req.Headers.Range = new RangeHeaderValue(start + existingBytes, end);
143
144 using var resp = await client.SendAsync(req, HttpCompletionOption.ResponseHeadersRead);
145 resp.EnsureSuccessStatusCode();
146
147 await using var stream = await resp.Content.ReadAsStreamAsync();
148 await using var fs = new FileStream(tempFile, existingBytes > 0 ? FileMode.Append : FileMode.Create,
149 FileAccess.Write, FileShare.None, 81920, useAsync: true);
150
151 var buffer = new byte[81920];
152 long downloaded = existingBytes;
153 int bytesRead;
154
155 while ((bytesRead = await stream.ReadAsync(buffer)) > 0)
156 {
157 await fs.WriteAsync(buffer.AsMemory(0, bytesRead));
158 downloaded += bytesRead;
159 Interlocked.Exchange(ref progress[index], downloaded);
160 }
161
162 return; // success
163 }
164 catch (Exception ex) when (attempt < MaxRetries)
165 {
166 Console.Error.WriteLine($"\n [chunk {index}] attempt {attempt} failed: {ex.Message} - retrying...");
167 await Task.Delay(RetryDelayMs * attempt);
168 }
169 }
170}
171
172static async Task SingleStreamDownload(HttpClient client, string url, string outputFile)
173{
174 var sw = Stopwatch.StartNew();
175 using var resp = await client.GetAsync(url, HttpCompletionOption.ResponseHeadersRead);
176 resp.EnsureSuccessStatusCode();
177
178 var total = resp.Content.Headers.ContentLength ?? -1;
179 await using var stream = await resp.Content.ReadAsStreamAsync();
180 await using var fs = new FileStream(outputFile, FileMode.Create, FileAccess.Write, FileShare.None, 81920, true);
181
182 var buffer = new byte[81920];
183 long downloaded = 0;
184 int bytesRead;
185 var lastUpdate = DateTimeOffset.UtcNow;
186
187 while ((bytesRead = await stream.ReadAsync(buffer)) > 0)
188 {
189 await fs.WriteAsync(buffer.AsMemory(0, bytesRead));
190 downloaded += bytesRead;
191
192 if ((DateTimeOffset.UtcNow - lastUpdate).TotalMilliseconds >= ProgressUpdateMs)
193 {
194 lastUpdate = DateTimeOffset.UtcNow;
195 var pct = total > 0 ? (double)downloaded / total * 100 : 0;
196 var speed = downloaded / sw.Elapsed.TotalSeconds;
197 Console.Write($"\r [{pct,5:F1}%] {FormatBytes(downloaded)}{(total > 0 ? $" / {FormatBytes(total)}" : "")} {FormatBytes((long)speed)}/s ");
198 }
199 }
200
201 sw.Stop();
202 Console.WriteLine($"\r [100.0%] {FormatBytes(downloaded)} {FormatBytes((long)(downloaded / sw.Elapsed.TotalSeconds))}/s - done. ");
203}
204
205static async Task Reassemble(string[] parts, string outputFile)
206{
207 await using var outFs = new FileStream(outputFile, FileMode.Create, FileAccess.Write, FileShare.None, 81920, true);
208 foreach (var part in parts)
209 {
210 await using var inFs = new FileStream(part, FileMode.Open, FileAccess.Read, FileShare.Read, 81920, true);
211 await inFs.CopyToAsync(outFs);
212 }
213}
214
215static void PrintProgress(long[] progress, List<(long Start, long End)> chunks, long totalSize, TimeSpan elapsed)
216{
217 long totalDownloaded = progress.Sum();
218 var pct = (double)totalDownloaded / totalSize * 100;
219 var speed = elapsed.TotalSeconds > 0 ? totalDownloaded / elapsed.TotalSeconds : 0;
220 var eta = speed > 0 ? TimeSpan.FromSeconds((totalSize - totalDownloaded) / speed) : TimeSpan.Zero;
221
222 // Per-chunk mini bars
223 var bars = new List<string>();
224 for (int i = 0; i < chunks.Count; i++)
225 {
226 var chunkSize = chunks[i].End - chunks[i].Start + 1;
227 var chunkPct = (double)progress[i] / chunkSize;
228 const int miniBarWidth = 8;
229 var filled = chunkPct <= 0
230 ? 0
231 : Math.Clamp((int)Math.Ceiling(chunkPct * miniBarWidth), 1, miniBarWidth);
232 bars.Add(new string('█', filled) + new string('░', miniBarWidth - filled));
233 }
234
235 Console.Write($"\r [{pct,5:F1}%] {FormatBytes(totalDownloaded)} / {FormatBytes(totalSize)} " +
236 $"{FormatBytes((long)speed)}/s ETA {eta:mm\\:ss} " +
237 $"[{string.Join('|', bars)}] ");
238}
239
240static string FormatBytes(long bytes)
241{
242 string[] units = ["B", "KB", "MB", "GB", "TB"];
243 double val = bytes;
244 int unit = 0;
245 while (val >= 1024 && unit < units.Length - 1) { val /= 1024; unit++; }
246 return $"{val:F1} {units[unit]}";
247}Let me know if this is of use to you.

I keep running into a version of the same question when talking about AI agent design: if you have good enough skills — detailed procedural knowledge in markdown files — do you even need MCP servers and other tools?
No. You absolutely still need tools. But the question itself reveals a misunderstanding about what skills actually are, and I think it’s worth unpacking.
Skills and tools are not competing approaches. You can’t replace one with the other. In practice, they’re deeply intertwined — and trying to pit them against each other misses the entire point of both.
I’m going to use RockBot as my example throughout this post because it’s what I’m building and I know it best, but these concepts are not specific to RockBot. Claude Code has its CLAUDE.md files and tool use. GitHub Copilot has instruction files, skills, and MCP integration. Cursor, Windsurf, and other AI coding agents all have some form of this pattern. The relationship between tools and skills is a fundamental design concern for any AI agent, not a feature of any one product.
I’ve written about RockBot’s tools and RockBot’s skills separately, so I won’t rehash everything here. The short version:
Tools are functions the agent can call to take action in the world. Send an email, check a calendar, search the web, invoke an A2A agent, store a memory. Without tools, an agent can only chat. A skill file cannot send an email. A skill file cannot look up what’s on your calendar. Tools are how agents do things.
Skills are markdown files that capture procedural, context-specific knowledge the agent has built up over time. They encode lessons from past failures, successful patterns, environment-specific conventions, and — critically — knowledge about how to use tools well.
That last point is the one people miss. Knowing that a hammer exists is different from knowing how to drive a nail without splitting the wood. The hammer is the tool. The technique is the skill. You need both.
In RockBot, the relationship between tools and skills isn’t just conceptual — it’s built into the architecture.
Every tool subsystem in the RockBot framework can register a base-level tool guide when it starts up. This is a default skill that the subsystem itself provides, describing how its tools should be used. When the MCP integration subsystem loads, it registers a guide explaining how mcp_list_services, mcp_get_service_details, and mcp_invoke_tool work together. The A2A subsystem does the same for agent-to-agent communication. The web subsystem explains how search and browsing tools relate. Memory, scheduling, subagents — each subsystem brings its own guide.
The agent uses list_tool_guides and get_tool_guide to discover and retrieve these guides. On day one, before any learning has happened, the agent already has grounded knowledge about how to use its tools — not just what they are, but how to use them effectively.
So right from the start, tools and skills are coupled. The tools arrive with skills already attached.
Those base-level tool guides are a starting point, not a ceiling.
As the agent uses its tools across real interactions, it learns. It discovers edge cases, finds better sequences, encounters caveats that weren’t obvious from the schema alone. Through RockBot’s feedback loop — explicit thumbs up/down from users and implicit correction signals from conversations — the agent refines and extends its skills.
I have a great real-world example of this. A while back, RockBot kept creating calendar events at the wrong time. It would send 4 PM Central to the calendar MCP server, and the event would show up at 11 AM. Four times in a row. It turned out the MCP server had a bug where it silently ignored the timezone parameter and treated all times as UTC.
The tool guide for the calendar MCP server didn’t mention this problem — because it didn’t exist when the guide was written. But after that painful debugging session, the agent learned the workaround (send UTC times directly), and that knowledge was captured as an updated skill. The next time the agent scheduled something, it didn’t make the same mistake. That learning was entirely dependent on having the tool in the first place. You can’t learn to work around a calendar bug if you don’t have a calendar.
That’s the pattern. The skill describing how to use the calendar MCP server on day one is fairly generic. After weeks of actual calendar management, that skill becomes precise: how to handle recurring events, what to do when attendee time zones differ, what the server does and doesn’t support. The agent has learned by doing, and the skill has grown because of it.
I want to be clear that skills aren’t only about tool usage. Skills capture all sorts of procedural knowledge: how to structure a research delegation, what tone to use with different contacts, how to format reports. Many skills have nothing to do with specific tools.
But a large and important subset of skills exist specifically to make tool usage more effective. And that’s the insight I think gets lost when people frame this as “tools vs. skills”: skills aren’t an alternative to tools. They’re a multiplier on tools.
Skills are operational knowledge — knowledge about tools, for tools, refined through using tools. They don’t sit above the tool layer in the architecture. They sit right alongside it, making it work better.
What RockBot demonstrates is that these two concepts work in concert at every level:
Tools provide capability. They are the agent’s connection to the real world — email, calendars, file storage, web, other agents.
Tool guides provide starting knowledge. Each subsystem ships with a skill that grounds the agent from the moment tools become available. The agent never has to figure out a subsystem entirely from scratch.
Experience improves that knowledge over time. As the agent uses tools, encounters failures, receives feedback, and discovers edge cases, skills get richer and more precise. Tool usage becomes more effective and more reliable.
Remove the tools and you have an agent that can describe how things should work but can’t actually do anything. Remove the skills and you have an agent that stumbles through every interaction, making the same mistakes over and over because nothing it learns ever sticks.
Together? You get an agent that keeps getting better at its job.
And again — this isn’t a RockBot-specific insight. Whether you’re configuring GitHub Copilot with custom instructions and MCP servers, setting up Claude Code with CLAUDE.md files and tool access, or building your own agent framework from scratch, the same principle applies. Tools give your agent the ability to act. Skills give it the knowledge to act well. Invest in both.
Final lesson, and probably my favorite.
We move from a copilot with tools to a system that also uses agents as composable specialists.
If you’re following along on your own, complete lesson 0 and lesson 7 first.
This lesson does not require a brand-new Azure resource, but it does add framework dependencies and orchestrates all prior components.
Once these checks pass, add the agent orchestration code. Debugging is much easier when the underlying tools are already healthy.
The workshop frames agents as “LIT”:
Copilot is the user-facing conversational surface.
Agents are focused components that can be called by the copilot (or by other agents) to do bounded jobs.
The initial StoryAgent is created with Microsoft Agent Framework and exposed as an AI tool.
This already gives a big capability jump: users can ask for tailored stories while the core copilot remains clean.
Then the workshop introduces an “agents as tools” orchestration pattern:
StoryAgent creates storyStorySummaryAgent extracts scene promptsImageGenerationAgent uses image tool to generate visualsStoryGenerationAgent supervises and returns story + image URLsThis is a practical orchestration pipeline, not just an abstract demo.
This pattern scales well because each agent has:
You can improve one agent without rewriting the whole system.
It’s the same architectural principle as microservices, just in AI-native form.
A cinematic command-center scene with three specialized AI holograms (story writer, scene summarizer, image artist) collaborating under a supervising orchestration AI, producing a final illustrated story output. Epic space-opera style, high detail, no text, no logos.
Workshop source for this lesson: Lesson 8 README.
And that’s the full 8-part journey: chat, memory, model choice, tools, MCP, RAG, multimodal, and agents.
Note: Original workshop repository: jimbobbennett/StarWarsCopilot.
Lesson 7 brings multimodal capabilities into the stack.
Instead of only generating text, the copilot can now generate images via an MCP tool.
If you’re following along on your own, complete lesson 0 and lesson 6 first.
This lesson adds a dedicated Azure OpenAI image deployment.
gpt-image-1 or gpt-image-1.5).dotnet user-secrets set "ImageGeneration:Endpoint" "https://<your-resource>.openai.azure.com/"
dotnet user-secrets set "ImageGeneration:APIKey" "<your-api-key>"
dotnet user-secrets set "ImageGeneration:ModelName" "<your-image-deployment-name>"
Tip: image generation can be expensive. Set a budget and test with a small number of prompts first.
Also worth noting: dall-e-3 has been retired in Azure OpenAI. Use gpt-image-1 or gpt-image-1.5 for new deployments.
The workshop adds GenerateStarWarsImageTool to the MCP server.
It:
gpt-image-1 series)Simple contract, high utility.
A really useful part of this lesson is how it handles content policy issues.
The first pass prompt nudges toward:
Then, if policy violations happen, the tool returns actionable retry guidance rather than a dead-end error.
That guidance tells the model to:
This is a great pattern for resilient AI systems: tools can teach the orchestrating model how to recover.
The system prompt is updated so if a tool asks for a retry, the assistant follows that instruction and retries.
This creates a self-healing loop:
That is much better UX than “sorry, failed”.
A vibrant cinematic artwork of an AI assistant projecting generated concept art panels in a starship studio, with one panel being refined into a safer descriptive prompt workflow. Retro space-opera vibe, rich colors, high detail, no text, no logos.
Workshop source for this lesson: Lesson 7 README.
Next up: building agents and a multi-agent story + image workflow.
Note: Original workshop repository: jimbobbennett/StarWarsCopilot.
By lesson 6, we already have an MCP server and client working.
Now we add a classic enterprise use case: retrieval from structured business data.
If you’re following along on your own, complete lesson 0 and lesson 5 first.
This lesson introduces a new Azure dependency: Storage Tables.
FigurinesOrdersOrderFigurinesdotnet user-secrets set "AzureStorage:ConnectionString" "<your-storage-connection-string>"
6-rag/dataloader you can run to populate sample data.Quick verification: confirm order 66 exists before testing StarWarsPurchaseTool.
A new MCP tool (StarWarsPurchaseTool) that queries Azure Table Storage to retrieve figurine order data.
The model can then answer questions like:
A lot of people hear “RAG” and think vector DB + embeddings.
This lesson is a good reminder that RAG simply means augmenting generation with retrieved data, and that retrieval source can be:
Here it’s plain table queries plus deterministic filtering logic.
The workshop uses three tables:
FigurinesOrdersOrderFigurinesThe tool accepts optional filters:
orderNumbercharacterNamecustomerNameThen combines data into a JSON payload the model can reason over.
This is exactly the pattern I like for production tools: perform strict filtering in code, let the model focus on explanation and narrative.
The lesson adds:
Azure.Data.Tables packageThe case-sensitive lookup note in the workshop is also an important real-world reminder: retrieval quality starts with query normalization rules.
A high-detail sci-fi data-vault scene with holographic tables of orders and figurines floating above a console while an AI assistant correlates records into a clear response stream. Cool cyan and purple palette, cinematic lighting, no text, no logos.
Workshop source for this lesson: Lesson 6 README.
Next up: multi-modal AI with image generation tools.
Note: Original workshop repository: jimbobbennett/StarWarsCopilot.